code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 20 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case_ (UpperCamelCase : str = "isbn/0140328726" ):
'''simple docstring'''
_a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_a = f'{olid} is not a valid Open Library olid'
raise ValueError(UpperCamelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def snake_case_ (UpperCamelCase : dict ):
'''simple docstring'''
_a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
_a = ''', '''.join(UpperCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case : str = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case : str = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print('\n'.join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 22 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> int:
debug_launcher(test_script.main )
def _UpperCAmelCase ( self ) -> Any:
debug_launcher(test_ops.main )
| 23 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : List[Any] = ya
SCREAMING_SNAKE_CASE : str = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Optional[int] = y[k] + step_size * ode_func(_a , y[k])
SCREAMING_SNAKE_CASE : Union[str, Any] = y[k] + (
(step_size / 2) * (ode_func(_a , y[k]) + ode_func(x + step_size , _a))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any]=13 , __magic_name__ : Optional[Any]=32 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Dict=[10, 20, 30, 40] , __magic_name__ : str=[2, 2, 3, 2] , __magic_name__ : Tuple=True , __magic_name__ : Any=True , __magic_name__ : Tuple=37 , __magic_name__ : int="gelu" , __magic_name__ : Union[str, Any]=10 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : int=["stage2", "stage3", "stage4"] , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=None , ) -> Tuple:
"""simple docstring"""
__snake_case : str = parent
__snake_case : List[Any] = batch_size
__snake_case : Any = image_size
__snake_case : int = num_channels
__snake_case : List[Any] = num_stages
__snake_case : Union[str, Any] = hidden_sizes
__snake_case : Dict = depths
__snake_case : Optional[int] = is_training
__snake_case : Any = use_labels
__snake_case : Union[str, Any] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = type_sequence_label_size
__snake_case : Any = initializer_range
__snake_case : str = out_features
__snake_case : str = num_labels
__snake_case : Dict = scope
__snake_case : Optional[int] = num_stages
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[int] = None
if self.use_labels:
__snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__magic_name__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__magic_name__ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : int = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__snake_case : Tuple = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: int = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__: Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__: int = False
lowercase__: str = False
lowercase__: Optional[Any] = False
lowercase__: List[str] = False
lowercase__: str = False
lowercase__: str = False
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = UperNetModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowercase__ ( self : int ) -> int:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any ):
__snake_case : Dict = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = _config_zero_init(__magic_name__ )
__snake_case : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : List[str] = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Dict = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
__snake_case : List[str] = Image.open(_lowerCamelCase ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
__snake_case : int = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_img()
__snake_case : int = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ )
__snake_case : str = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : List[str] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : str = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
__snake_case : Any = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(__magic_name__ )
__snake_case : Union[str, Any] = prepare_img()
__snake_case : Optional[int] = processor(images=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
__snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : str = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __magic_name__ , atol=1E-4 ) )
| 26 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
import os
from collections.abc import Iterator
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_SCREAMING_SNAKE_CASE ):
_A = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).lstrip('./' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return F"{i * ' '}*" if i else "\n##"
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = "." ) -> None:
"""simple docstring"""
_A = ''
for filepath in sorted(good_file_paths(_SCREAMING_SNAKE_CASE ) ):
_A, _A = os.path.split(_SCREAMING_SNAKE_CASE )
if filepath != old_path:
_A = print_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = (filepath.count(os.sep ) + 1) if filepath else 0
_A = F"{filepath}/{filename}".replace(' ' , '%20' )
_A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F"{md_prefix(_SCREAMING_SNAKE_CASE )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(".")
| 27 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = MvpTokenizer
A : Union[str, Any] = MvpTokenizerFast
A : Tuple = True
A : List[Any] = filter_roberta_detectors
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Optional[Any] = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE : List[str] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(A, max_length=len(A ), padding=A, return_tensors='pt' )
self.assertIsInstance(A, A )
self.assertEqual((2, 9), batch.input_ids.shape )
self.assertEqual((2, 9), batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(A, A )
# Test that special tokens are reset
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Dict = tokenizer(A, padding=A, return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids', A )
self.assertIn('attention_mask', A )
self.assertNotIn('labels', A )
self.assertNotIn('decoder_attention_mask', A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : int = tokenizer(text_target=A, max_length=32, padding='max_length', return_tensors='pt' )
self.assertEqual(32, targets['input_ids'].shape[1] )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
['I am a small frog' * 1_024, 'I am a small frog'], padding=A, truncation=A, return_tensors='pt' )
self.assertIsInstance(A, A )
self.assertEqual(batch.input_ids.shape, (2, 1_024) )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ['A long paragraph for summarization.']
SCREAMING_SNAKE_CASE : Dict = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(A, text_target=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs['input_ids']
SCREAMING_SNAKE_CASE : List[Any] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : List[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 28 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCAmelCase = True
lowerCAmelCase = '''ml.p3.2xlarge'''
lowerCAmelCase = '''accelerate_sagemaker_execution_role'''
lowerCAmelCase = '''hf-sm'''
lowerCAmelCase = '''us-east-1'''
lowerCAmelCase = 1
lowerCAmelCase = '''accelerate-sagemaker-1'''
lowerCAmelCase = '''1.6'''
lowerCAmelCase = '''4.4'''
lowerCAmelCase = '''train.py'''
lowerCAmelCase = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
lowerCAmelCase = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCAmelCase_ : Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''do_train'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''epochs'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''learning_rate'''] ,_SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['''max_steps'''] ,_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 30 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "openai-gpt"
lowercase_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]=40_478 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : int=768 , _lowerCAmelCase : str=12 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Tuple=1E-5 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Any="cls_index" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0.1 , **_lowerCAmelCase : Optional[int] , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = n_positions
SCREAMING_SNAKE_CASE_ = n_embd
SCREAMING_SNAKE_CASE_ = n_layer
SCREAMING_SNAKE_CASE_ = n_head
SCREAMING_SNAKE_CASE_ = afn
SCREAMING_SNAKE_CASE_ = resid_pdrop
SCREAMING_SNAKE_CASE_ = embd_pdrop
SCREAMING_SNAKE_CASE_ = attn_pdrop
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = summary_type
SCREAMING_SNAKE_CASE_ = summary_use_proj
SCREAMING_SNAKE_CASE_ = summary_activation
SCREAMING_SNAKE_CASE_ = summary_first_dropout
SCREAMING_SNAKE_CASE_ = summary_proj_to_labels
super().__init__(**_lowerCAmelCase ) | 31 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_UpperCamelCase )
_UpperCAmelCase = replicate(_UpperCamelCase )
_UpperCAmelCase = shard(_UpperCamelCase )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_UpperCamelCase , jax.device_count() )
_UpperCAmelCase = sd_pipe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_inference_steps=25 , jit=_UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''stabilityai/stable-diffusion-2'''
_UpperCAmelCase , _UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
_UpperCamelCase , scheduler=_UpperCamelCase , revision='''bf16''' , dtype=jnp.bfloataa , )
_UpperCAmelCase = scheduler_params
_UpperCAmelCase = '''A painting of a squirrel eating a burger'''
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = sd_pipe.prepare_inputs(_UpperCamelCase )
_UpperCAmelCase = replicate(_UpperCamelCase )
_UpperCAmelCase = shard(_UpperCamelCase )
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = jax.random.split(_UpperCamelCase , jax.device_count() )
_UpperCAmelCase = sd_pipe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , num_inference_steps=25 , jit=_UpperCamelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCAmelCase = images[0, 253:256, 253:256, -1]
_UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 32 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Union[str, Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 33 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PIL.Image.BICUBIC , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = 1 / 2_5_5 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name='''crop_size''')
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PIL.Image.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
UpperCamelCase = get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return resize(
lowerCamelCase_ , size=(size['''height'''], size['''width''']) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
UpperCamelCase = get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Tuple:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name='''crop_size''')
UpperCamelCase = make_list_of_images(lowerCamelCase_)
if not valid_images(lowerCamelCase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase_) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_) for image in images]
UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_) | 34 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ :List[str] = imread('image_data/lena.jpg', 1)
# convert to its negative
a_ :Optional[Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 35 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : dict[str, TrieNode] = {} # Mapping from char to TrieNode
snake_case : Optional[int] = False
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = self
for char in word:
if char not in curr.nodes:
snake_case : Optional[Any] = TrieNode()
snake_case : Optional[Any] = curr.nodes[char]
snake_case : List[Any] = True
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Tuple = self
for char in word:
if char not in curr.nodes:
return False
snake_case : Any = curr.nodes[char]
return curr.is_leaf
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> bool:
if index == len(SCREAMING_SNAKE_CASE_ ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case : Tuple = False
return len(curr.nodes ) == 0
snake_case : Dict = word[index]
snake_case : Any = curr.nodes.get(SCREAMING_SNAKE_CASE_ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case : List[Any] = _delete(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self ,SCREAMING_SNAKE_CASE_ ,0 )
def lowercase ( __A : TrieNode , __A : str ) -> None:
'''simple docstring'''
if node.is_leaf:
print(__A , end=""" """ )
for key, value in node.nodes.items():
print_words(__A , word + key )
def lowercase ( ) -> bool:
'''simple docstring'''
snake_case : Any = """banana bananas bandana band apple all beast""".split()
snake_case : Dict = TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowercase ( __A : str , __A : bool ) -> None:
'''simple docstring'''
print(str(__A ) , """works!""" if passes else """doesn't work :(""" )
def lowercase ( ) -> None:
'''simple docstring'''
assert test_trie()
def lowercase ( ) -> None:
'''simple docstring'''
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 36 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int=0.2 , lowerCamelCase__ : Dict=0.2 ):
a__ : Optional[int] = bp_numa
a__ : Tuple = bp_numa
a__ : Optional[Any] = bp_numa
a__ : str = conva_get[:2]
a__ : Optional[int] = conva_get[2]
a__ : Any = size_pa
a__ : List[str] = rate_w
a__ : str = rate_t
a__ : Optional[int] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a__ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a__ : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
a__ : List[str] = -2 * np.random.rand(self.num_bpa ) + 1
a__ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Optional[Any] ):
# save model dict with pickle
a__ : int = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowerCamelCase__ , "wb" ) as f:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCamelCase( cls : Tuple , lowerCamelCase__ : Tuple ):
# read saved model
with open(lowerCamelCase__ , "rb" ) as f:
a__ : Tuple = pickle.load(lowerCamelCase__ ) # noqa: S301
a__ : List[str] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
a__ : Optional[Any] = model_dic.get("size_pooling1" )
a__ : Optional[Any] = model_dic.get("num_bp1" )
a__ : Tuple = model_dic.get("num_bp2" )
a__ : int = model_dic.get("num_bp3" )
a__ : Tuple = model_dic.get("rate_weight" )
a__ : Optional[Any] = model_dic.get("rate_thre" )
# create model instance
a__ : Tuple = CNN(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# modify model parameter
a__ : Tuple = model_dic.get("w_conv1" )
a__ : int = model_dic.get("wkj" )
a__ : List[str] = model_dic.get("vji" )
a__ : Optional[int] = model_dic.get("thre_conv1" )
a__ : Optional[int] = model_dic.get("thre_bp2" )
a__ : Optional[int] = model_dic.get("thre_bp3" )
return conv_ins
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
return 1 / (1 + np.exp(-1 * x ))
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ):
return round(lowerCamelCase__ , 3 )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ):
# convolution process
a__ : Any = convs[0]
a__ : str = convs[1]
a__ : int = np.shape(lowerCamelCase__ )[0]
# get the data slice of original image data, data_focus
a__ : Optional[int] = []
for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ):
a__ : List[str] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
a__ : str = []
a__ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase__ ):
a__ : Optional[Any] = []
for i_focus in range(len(lowerCamelCase__ ) ):
a__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase__ ) )
a__ : Optional[int] = np.asmatrix(lowerCamelCase__ ).reshape(
lowerCamelCase__ , lowerCamelCase__ )
data_featuremap.append(lowerCamelCase__ )
# expanding the data slice to One dimenssion
a__ : Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase__ ) )
a__ : Union[str, Any] = np.asarray(lowerCamelCase__ )
return focus_list, data_featuremap
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]="average_pool" ):
# pooling process
a__ : Union[str, Any] = len(featuremaps[0] )
a__ : Union[str, Any] = int(size_map / size_pooling )
a__ : List[str] = []
for i_map in range(len(lowerCamelCase__ ) ):
a__ : str = featuremaps[i_map]
a__ : Tuple = []
for i_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
a__ : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase__ ) )
a__ : List[str] = np.asmatrix(lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ )
featuremap_pooled.append(lowerCamelCase__ )
return featuremap_pooled
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
# expanding three dimension data to one dimension list
a__ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
a__ : Dict = np.shape(data[i] )
a__ : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
a__ : List[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase__ )
a__ : Dict = np.asarray(lowerCamelCase__ )
return data_expanded
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Tuple ):
# expanding matrix to one dimension list
a__ : str = np.asarray(lowerCamelCase__ )
a__ : Optional[int] = np.shape(lowerCamelCase__ )
a__ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Any ):
a__ : int = []
a__ : int = 0
for i_map in range(lowerCamelCase__ ):
a__ : List[Any] = np.ones((size_map, size_map) )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
for j in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
a__ : Union[str, Any] = pd_pool[
i_pool
]
a__ : str = i_pool + 1
a__ : Any = np.multiply(
lowerCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase__ )
return pd_all
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=bool ):
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(lowerCamelCase__ )) )
print((" - - Shape: Teach_Data ", np.shape(lowerCamelCase__ )) )
a__ : str = 0
a__ : List[str] = []
a__ : int = 10_000
while rp < n_repeat and mse >= error_accuracy:
a__ : Optional[int] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
a__ : Optional[Any] = np.asmatrix(datas_train[p] )
a__ : str = np.asarray(datas_teach[p] )
a__, a__ : Dict = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : int = self.pooling(lowerCamelCase__ , self.size_poolinga )
a__ : Dict = np.shape(lowerCamelCase__ )
a__ : List[str] = self._expand(lowerCamelCase__ )
a__ : Dict = data_bp_input
a__ : Dict = np.dot(lowerCamelCase__ , self.vji.T ) - self.thre_bpa
a__ : Optional[int] = self.sig(lowerCamelCase__ )
a__ : int = np.dot(lowerCamelCase__ , self.wkj.T ) - self.thre_bpa
a__ : int = self.sig(lowerCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
a__ : Optional[Any] = np.multiply(
np.dot(lowerCamelCase__ , self.wkj ) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) )
a__ : Optional[Any] = np.dot(lowerCamelCase__ , self.vji )
a__ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
a__ : Optional[Any] = pd_conva_pooled.T.getA().tolist()
a__ : Dict = self._calculate_gradient_from_pool(
lowerCamelCase__ , lowerCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a__ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
a__ : Any = self.rate_weight * np.dot(lowerCamelCase__ , lowerCamelCase__ )
a__ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a__ : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a__ : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a__ : str = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a__ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre
a__ : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a__ : str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a__ : str = rp + 1
a__ : Tuple = error_count / patterns
all_mse.append(lowerCamelCase__ )
def draw_error():
a__ : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase__ , "+-" )
plt.plot(lowerCamelCase__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(lowerCamelCase__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : int ):
# model predict
a__ : Optional[int] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(lowerCamelCase__ )) )
for p in range(len(lowerCamelCase__ ) ):
a__ : str = np.asmatrix(datas_test[p] )
a__, a__ : Tuple = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Optional[int] = self.pooling(lowerCamelCase__ , self.size_poolinga )
a__ : Any = self._expand(lowerCamelCase__ )
a__ : Any = data_bp_input
a__ : str = bp_outa * self.vji.T - self.thre_bpa
a__ : Tuple = self.sig(lowerCamelCase__ )
a__ : Any = bp_outa * self.wkj.T - self.thre_bpa
a__ : List[str] = self.sig(lowerCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
a__ : List[str] = [list(map(self.do_round , lowerCamelCase__ ) ) for each in produce_out]
return np.asarray(lowerCamelCase__ )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any ):
# return the data of image after convoluting process so we can check it out
a__ : List[Any] = np.asmatrix(lowerCamelCase__ )
a__, a__ : List[Any] = self.convolute(
lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Any = self.pooling(lowerCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 37 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
'''simple docstring'''
import os
def UpperCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with open(os.path.dirname(__magic_name__ ) + """/grid.txt""" ) as f:
snake_case__ : List[str] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__magic_name__ ) for x in f.readline().split()] )
snake_case__ : Optional[int] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case__ : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case__ : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case__ : int = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case__ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case__ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case__ : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
snake_case__ : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case__ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = "laptop" ):
snake_case_ = F'''https://www.amazon.in/laptop/s?k={product}'''
snake_case_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).text )
# Initialize a Pandas dataframe with the column titles
snake_case_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
snake_case_ = item.ha.text
snake_case_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case_ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
snake_case_ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
snake_case_ = '''Not available'''
try:
snake_case_ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
snake_case_ = ''''''
try:
snake_case_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
snake_case_ = float('''nan''' )
except AttributeError:
pass
snake_case_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case_ = ''' '''
snake_case_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase_ = '''headphones'''
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""") | 39 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = '''src/transformers'''
__UpperCAmelCase = '''docs/source/en/tasks'''
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any ) -> Optional[int]:
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase : Optional[Any] = f.readlines()
# Find the start prompt.
UpperCamelCase : List[Any] = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
UpperCamelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : Tuple = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() )
UpperCamelCase : Tuple = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def UpperCamelCase ( snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = _find_text_in_file(
filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase : Optional[Any] = get_model_list_for_task(snake_case__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2"] , SCREAMING_SNAKE_CASE_=[1, 2] , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = patch_norm
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = is_training
lowerCamelCase_ = scope
lowerCamelCase_ = use_labels
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = encoder_stride
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = FocalNetModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = FocalNetBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
lowerCamelCase_ = FocalNetForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = FocalNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 , has_text_modality=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# FocalNet has a different seq_length
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = reshaped_hidden_states[0].shape
lowerCamelCase_ = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = FocalNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = FocalNetConfig
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = FocalNetModelTester(self )
| 42 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
from __future__ import annotations
import math
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
def _a ( ):
"""simple docstring"""
lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
def __init__( self : Tuple,__A : Union[str, Any],__A : Union[str, Any]=1_3,__A : Optional[Any]=7,__A : int=True,__A : Union[str, Any]=True,__A : Tuple=True,__A : List[str]=True,__A : Union[str, Any]=9_9,__A : int=3_2,__A : List[str]=2,__A : Optional[int]=4,__A : int=3_7,__A : Union[str, Any]="gelu",__A : Optional[int]=0.1,__A : Optional[int]=0.1,__A : List[Any]=5_1_2,__A : int=1_6,__A : int=2,__A : str=0.02,__A : Tuple=3,__A : Union[str, Any]=4,__A : str=None,):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Dict = 1_3
_lowerCamelCase : Union[str, Any] = 7
_lowerCamelCase : int = True
_lowerCamelCase : int = True
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 9_9
_lowerCamelCase : Dict = 3_2
_lowerCamelCase : int = 2
_lowerCamelCase : str = 4
_lowerCamelCase : List[str] = 3_7
_lowerCamelCase : List[str] = "gelu"
_lowerCamelCase : List[Any] = 0.1
_lowerCamelCase : List[str] = 0.1
_lowerCamelCase : List[str] = 5_1_2
_lowerCamelCase : Optional[int] = 1_6
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Optional[int] = 0.02
_lowerCamelCase : Dict = 3
_lowerCamelCase : List[str] = 4
_lowerCamelCase : int = None
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : List[str] = None
if self.use_input_mask:
_lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = None
if self.use_token_type_ids:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : List[Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : List[str] = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : Any = RoFormerConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=__A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any],__A : Dict,__A : Optional[Any],__A : Union[str, Any],__A : Dict,__A : int,__A : Union[str, Any],__A : List[Any] ):
_lowerCamelCase : Any = TFRoFormerModel(config=__A )
_lowerCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCamelCase : Dict = [input_ids, input_mask]
_lowerCamelCase : Optional[int] = model(__A )
_lowerCamelCase : List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : Tuple,__A : Any,__A : Optional[Any],__A : int,__A : Optional[Any],__A : int ):
_lowerCamelCase : Tuple = True
_lowerCamelCase : Optional[int] = TFRoFormerForCausalLM(config=__A )
_lowerCamelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(__A )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ),[self.batch_size, self.seq_length, self.vocab_size] )
def lowerCamelCase_ ( self : str,__A : Any,__A : Tuple,__A : str,__A : int,__A : Any,__A : Any,__A : str ):
_lowerCamelCase : Optional[int] = TFRoFormerForMaskedLM(config=__A )
_lowerCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any,__A : int,__A : Dict,__A : List[str],__A : Tuple,__A : List[str],__A : int,__A : Union[str, Any] ):
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : int = TFRoFormerForSequenceClassification(config=__A )
_lowerCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Tuple,__A : Optional[Any],__A : Optional[Any],__A : int,__A : Dict,__A : Union[str, Any],__A : Optional[int],__A : Optional[int] ):
_lowerCamelCase : Optional[Any] = self.num_choices
_lowerCamelCase : Optional[int] = TFRoFormerForMultipleChoice(config=__A )
_lowerCamelCase : int = tf.tile(tf.expand_dims(__A,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : int = tf.tile(tf.expand_dims(__A,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : Any = tf.tile(tf.expand_dims(__A,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Optional[Any],__A : Union[str, Any],__A : List[Any],__A : Union[str, Any],__A : Optional[int],__A : Any,__A : Any,__A : Optional[Any] ):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : Optional[int] = TFRoFormerForTokenClassification(config=__A )
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : Dict,__A : Optional[Any],__A : List[Any],__A : int,__A : Tuple,__A : Optional[int] ):
_lowerCamelCase : Any = TFRoFormerForQuestionAnswering(config=__A )
_lowerCamelCase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Dict = config_and_inputs
_lowerCamelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str],__A : List[str],__A : List[Any],__A : int,__A : str,__A : Optional[int] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCamelCase_ ( self : Optional[int] ):
_lowerCamelCase : Dict = TFRoFormerModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self,config_class=__A,hidden_size=3_7 )
def lowerCamelCase_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__A )
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : List[Any] = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__A )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_lowerCamelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : List[str] = model(__A )[0]
# TODO Replace vocab size
_lowerCamelCase : List[Any] = 5_0_0_0_0
_lowerCamelCase : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape,__A )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowerCamelCase : Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3],__A,atol=1e-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = 1E-4
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = tf.constant([[4, 1_0]] )
_lowerCamelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6,embedding_dim=6 )
_lowerCamelCase : int = emba(input_ids.shape )
_lowerCamelCase : Union[str, Any] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__A,__A,atol=self.tolerance )
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : Any = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowerCamelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2,embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
_lowerCamelCase : List[str] = emba.weight[:3, :5]
tf.debugging.assert_near(__A,__A,atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = 1E-4
def lowerCamelCase_ ( self : int ):
# 2,12,16,64
_lowerCamelCase : Optional[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4,dtype=tf.floataa ),shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowerCamelCase : Union[str, Any] = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4,dtype=tf.floataa ),shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
_lowerCamelCase : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2,embedding_dim=6_4 )
_lowerCamelCase : Tuple = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
_lowerCamelCase , _lowerCamelCase : List[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__A,__A,__A )
_lowerCamelCase : Union[str, Any] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowerCamelCase : Dict = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8],__A,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8],__A,atol=self.tolerance ) | 44 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def A ( lowercase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowercase__ )
UpperCamelCase__ :Any = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
UpperCamelCase__ :Optional[Any] = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCamelCase__ :int = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
UpperCamelCase__ :List[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def A ( lowercase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
UpperCamelCase__ :Dict = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
UpperCamelCase__ :List[str] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
UpperCamelCase__ :int = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCamelCase__ :int = encoded_data[:-padding]
UpperCamelCase__ :Optional[int] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCamelCase__ :List[str] = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
UpperCamelCase__ :Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> float:
'''simple docstring'''
def get_matched_characters(_lowerCamelCase , _lowerCamelCase ) -> str:
_lowerCamelCase : Tuple = []
_lowerCamelCase : List[str] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowerCamelCase : str = int(max(0 , i - limit ) )
_lowerCamelCase : int = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_lowerCamelCase )
_lowerCamelCase : Optional[int] = F"""{_stra[0:_stra.index(_lowerCamelCase )]} {_stra[_stra.index(_lowerCamelCase ) + 1:]}"""
return "".join(_lowerCamelCase )
# matching characters
_lowerCamelCase : Tuple = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = get_matched_characters(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Dict = len(_lowerCamelCase )
# transposition
_lowerCamelCase : Dict = (
len([(ca, ca) for ca, ca in zip(_lowerCamelCase , _lowerCamelCase ) if ca != ca] ) // 2
)
if not match_count:
_lowerCamelCase : int = 0.0
else:
_lowerCamelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_lowerCamelCase )
+ match_count / len(_lowerCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowerCamelCase : Tuple = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 46 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=1_0_2_4 , lowerCamelCase_ : Optional[Any]=1_0_2_4 , lowerCamelCase_ : Optional[Any]=False , **lowerCamelCase_ : str ):
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
__a : Tuple = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path='train' , **lowerCamelCase_ )
__a : Dict = tok.pad_token_id
def get_lens(lowerCamelCase_ : int ):
__a : Tuple = tqdm(
DataLoader(lowerCamelCase_ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a : Optional[int] = []
for batch in dl:
__a : int = batch['input_ids'].ne(lowerCamelCase_ ).sum(1 ).tolist()
__a : Optional[Any] = batch['labels'].ne(lowerCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCamelCase_ , lowerCamelCase_ ):
max_lens.append(max(lowerCamelCase_ , lowerCamelCase_ ) )
else:
max_lens.extend(lowerCamelCase_ )
return max_lens
__a : int = get_lens(lowerCamelCase_ )
__a : List[Any] = SeqaSeqDataset(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , type_path='val' , **lowerCamelCase_ )
__a : Any = get_lens(lowerCamelCase_ )
pickle_save(lowerCamelCase_ , train_ds.len_file )
pickle_save(lowerCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 47 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class A ( SCREAMING_SNAKE_CASE__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
snake_case__ :str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ :ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
snake_case__ :ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
snake_case__ :str = "question"
snake_case__ :str = "context"
snake_case__ :str = "answers"
@property
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 48 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = 0
for ch in input_str:
__UpperCAmelCase = ord(snake_case_ )
__UpperCAmelCase = pow(2 , snake_case_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip_text_model'
def __init__( self ,_lowerCAmelCase=3_05_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=8 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1_02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
super().__init__(
pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,sep_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = encoder_hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = is_decoder
lowerCamelCase__ = use_cache
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip_vision_model'
def __init__( self ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=3_84 ,_lowerCAmelCase=16 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-10 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = patch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = hidden_act
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'blip'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2.6592 ,_lowerCAmelCase=2_56 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
lowerCamelCase__ = BlipTextConfig(**_lowerCAmelCase )
lowerCamelCase__ = BlipVisionConfig(**_lowerCAmelCase )
lowerCamelCase__ = self.vision_config.hidden_size
lowerCamelCase__ = projection_dim
lowerCamelCase__ = logit_scale_init_value
lowerCamelCase__ = 1.0
lowerCamelCase__ = 0.02
lowerCamelCase__ = image_text_hidden_size
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 50 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : int ):
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = BlipImageProcessor()
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase = BlipProcessor(a__ , a__ )
processor.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict , **a__ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).tokenizer
def __snake_case ( self : Dict , **a__ : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : Tuple ):
UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : List[str] ):
UpperCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
UpperCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = image_processor(a__ , return_tensors='''np''' )
UpperCAmelCase = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : Any ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = processor(text=a__ )
UpperCAmelCase = tokenizer(a__ , return_token_type_ids=a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : str ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase = processor.batch_decode(a__ )
UpperCAmelCase = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case ( self : int ):
UpperCAmelCase = self.get_image_processor()
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BlipProcessor(tokenizer=a__ , image_processor=a__ )
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = self.prepare_image_inputs()
UpperCAmelCase = processor(text=a__ , images=a__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 51 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def __A ( a_ :Tuple) -> List[str]:
return choice(a_)
def __A ( a_ :list[int] , a_ :int) -> int:
__a : Optional[int] = random_pivot(a_)
# partition based on pivot
# linear time
__a : Union[str, Any] = [e for e in lst if e < pivot]
__a : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(a_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(a_) < k - 1:
return kth_number(a_ , k - len(a_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
# Algorithm for the pigeonhole sorting
def a_ ( lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = min(lowerCAmelCase_ ) # min() finds the minimum value
__lowerCAmelCase = max(lowerCAmelCase_ ) # max() finds the maximum value
__lowerCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowerCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCAmelCase_, lowerCAmelCase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowerCAmelCase = 0
for count in range(lowerCAmelCase_ ):
while holes[count] > 0:
holes[count] -= 1
__lowerCAmelCase = count + min_val
i += 1
def a_ ( ):
__lowerCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCAmelCase_ )
print('Sorted order is:', ' '.join(lowerCAmelCase_ ) )
if __name__ == "__main__":
main()
| 53 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import operator
def a__ ( lowercase__ , lowercase__ = False , lowercase__ = None ):
'''simple docstring'''
UpperCAmelCase_ =operator.lt if reverse else operator.gt
UpperCAmelCase_ =solution or []
if not arr:
return solution
UpperCAmelCase_ =[arr.pop(0 )]
for i, item in enumerate(lowercase__ ):
if _operator(lowercase__ , sublist[-1] ):
sublist.append(lowercase__ )
arr.pop(lowercase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase__ )
else:
while sublist:
UpperCAmelCase_ =sublist.pop(0 )
for i, xx in enumerate(lowercase__ ):
if not _operator(lowercase__ , lowercase__ ):
solution.insert(lowercase__ , lowercase__ )
break
else:
solution.append(lowercase__ )
strand_sort(lowercase__ , lowercase__ , lowercase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 54 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE :str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Any = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a : Optional[int] = "scheduler_config.json"
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : List[str] = 1
_SCREAMING_SNAKE_CASE : Any = 2
_SCREAMING_SNAKE_CASE : int = 3
_SCREAMING_SNAKE_CASE : str = 4
_SCREAMING_SNAKE_CASE : int = 5
@dataclass
class _lowercase ( __lowercase ):
_SCREAMING_SNAKE_CASE : jnp.ndarray
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[Any] = SCHEDULER_CONFIG_NAME
_SCREAMING_SNAKE_CASE : Any = ["dtype"]
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Any = True
@classmethod
def a ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Dict[str, Any] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : List[Any]=False , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Dict:
__snake_case , __snake_case = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case , __snake_case = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , 'create_state' ) and getattr(SCREAMING_SNAKE_CASE_ , 'has_state' , SCREAMING_SNAKE_CASE_ ):
__snake_case = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def a ( self : Optional[Any] ) -> Optional[int]:
return self._get_compatibles()
@classmethod
def a ( cls : Any ) -> int:
__snake_case = list(set([cls.__name__] + cls._compatibles ) )
__snake_case = importlib.import_module(__name__.split('.' )[0] )
__snake_case = [
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return compatible_classes
def _a (lowercase__ : jnp.ndarray , lowercase__ : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(lowercase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowercase__ ) - x.ndim) ) , lowercase__ )
def _a (lowercase__ : int , lowercase__ : Optional[Any]=0.9_99 , lowercase__ : Dict=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(lowercase__ : Union[str, Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__snake_case = []
for i in range(lowercase__ ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowercase__ ) / alpha_bar(lowercase__ ) , lowercase__ ) )
return jnp.array(lowercase__ , dtype=lowercase__ )
@flax.struct.dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
_SCREAMING_SNAKE_CASE : jnp.ndarray
@classmethod
def a ( cls : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict:
__snake_case = scheduler.config
if config.trained_betas is not None:
__snake_case = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__snake_case = 1.0 - betas
__snake_case = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , )
def _a (lowercase__ : CommonSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray ) -> Optional[int]:
"""simple docstring"""
__snake_case = state.alphas_cumprod
__snake_case = alphas_cumprod[timesteps] ** 0.5
__snake_case = sqrt_alpha_prod.flatten()
__snake_case = broadcast_to_shape_from_left(lowercase__ , original_samples.shape )
__snake_case = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case = sqrt_one_minus_alpha_prod.flatten()
__snake_case = broadcast_to_shape_from_left(lowercase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a (lowercase__ : CommonSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray ) -> int:
"""simple docstring"""
__snake_case , __snake_case = get_sqrt_alpha_prod(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__snake_case = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a (lowercase__ : CommonSchedulerState , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray , lowercase__ : jnp.ndarray ) -> List[Any]:
"""simple docstring"""
__snake_case , __snake_case = get_sqrt_alpha_prod(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__snake_case = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 56 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , ):
super().__init__()
UpperCamelCase_: Optional[int] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[int] = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: Optional[Any] = False
UpperCamelCase_: Optional[int] = nn.Dropout(p=_lowerCamelCase )
UpperCamelCase_: Dict = TaConfig(
vocab_size=_lowerCamelCase , d_model=_lowerCamelCase , num_heads=_lowerCamelCase , d_kv=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , feed_forward_proj=_lowerCamelCase , is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , )
UpperCamelCase_: List[str] = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
UpperCamelCase_: Tuple = TaBlock(_lowerCamelCase )
self.encoders.append(_lowerCamelCase )
UpperCamelCase_: Tuple = TaLayerNorm(_lowerCamelCase )
UpperCamelCase_: Dict = nn.Dropout(p=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = self.token_embedder(_lowerCamelCase )
UpperCamelCase_: Tuple = encoder_input_tokens.shape[1]
UpperCamelCase_: Optional[Any] = torch.arange(_lowerCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(_lowerCamelCase )
UpperCamelCase_: Optional[Any] = self.dropout_pre(_lowerCamelCase )
# inverted the attention mask
UpperCamelCase_: Dict = encoder_input_tokens.size()
UpperCamelCase_: str = self.get_extended_attention_mask(_lowerCamelCase , _lowerCamelCase )
for lyr in self.encoders:
UpperCamelCase_: Optional[Any] = lyr(_lowerCamelCase , _lowerCamelCase )[0]
UpperCamelCase_: Union[str, Any] = self.layer_norm(_lowerCamelCase )
return self.dropout_post(_lowerCamelCase ), encoder_inputs_mask | 57 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCAmelCase : Optional[int] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__A = get_tests_dir("fixtures")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =mock.Mock()
lowerCamelCase__: Union[str, Any] =500
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: Any =HTTPError
lowerCamelCase__: Union[str, Any] ={}
# Download this model to make sure it's in the cache.
lowerCamelCase__: str =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase_) as mock_head:
lowerCamelCase__: Dict =ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__: int =AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
lowerCamelCase__: Any =AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(UpperCAmelCase_)
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any]) ->Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: str =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="test-image-processor" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Any =ViTImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
lowerCamelCase__: int =ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
UpperCAmelCase_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
lowerCamelCase__: Tuple =ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCamelCase__: Any =CustomImageProcessor.from_pretrained(UpperCAmelCase_)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCamelCase__: str =AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 59 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = GPTSwaTokenizer
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Union[str, Any] = False
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : List[Any] = GPTSwaTokenizer(__magic_name__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = '''This is a test'''
snake_case_ : Tuple = '''This is a test'''
return input_text, output_text
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : int = '''<s>'''
snake_case_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__magic_name__ ) , 2000 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = GPTSwaTokenizer(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [465, 287, 265, 631, 842] )
snake_case_ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
__magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
snake_case_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = GPTSwaTokenizer(__magic_name__ )
snake_case_ : Tuple = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
snake_case_ : str = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : int = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
snake_case_ : Optional[Any] = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__magic_name__ , )
| 60 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _A ( lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = SwinConfig()
lowerCAmelCase__ = swin_name.split("_" )
lowerCAmelCase__ = name_split[1]
lowerCAmelCase__ = int(name_split[4] )
lowerCAmelCase__ = int(name_split[3][-1] )
if model_size == "tiny":
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 6, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase__ = 96
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase__ = 128
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (4, 8, 16, 32)
else:
lowerCAmelCase__ = 192
lowerCAmelCase__ = (2, 2, 18, 2)
lowerCAmelCase__ = (6, 12, 24, 48)
if "in22k" in swin_name:
lowerCAmelCase__ = 2_1841
else:
lowerCAmelCase__ = 1000
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = img_size
lowerCAmelCase__ = num_classes
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
return config
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowerCAmelCase__ = "encoder." + name
if "attn.proj" in name:
lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCAmelCase__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowerCAmelCase__ = "layernorm.weight"
if name == "norm.bias":
lowerCAmelCase__ = "layernorm.bias"
if "head" in name:
lowerCAmelCase__ = name.replace("head" , "classifier" )
else:
lowerCAmelCase__ = "swin." + name
return name
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(lowerCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[1] )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[
:dim
]
lowerCAmelCase__ = val[
dim : dim * 2
]
lowerCAmelCase__ = val[
-dim:
]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
lowerCAmelCase__ = get_swin_config(lowerCAmelCase_ )
lowerCAmelCase__ = SwinForImageClassification(lowerCAmelCase_ )
model.eval()
lowerCAmelCase__ = convert_state_dict(timm_model.state_dict() , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowerCAmelCase__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
lowerCAmelCase__ = image_processor(images=lowerCAmelCase_ , return_tensors="pt" )
lowerCAmelCase__ = timm_model(inputs["pixel_values"] )
lowerCAmelCase__ = model(**lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 61 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert("RGB" )
return image
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = dct.pop(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
SCREAMING_SNAKE_CASE : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
SCREAMING_SNAKE_CASE : Dict = qkv_bias
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = 364 if "coco" in model_name else 224
SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
SCREAMING_SNAKE_CASE : Dict = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
SCREAMING_SNAKE_CASE : int = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
SCREAMING_SNAKE_CASE : Tuple = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase=None , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer("\n" , add_special_tokens=lowercase ).input_ids[0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_blipa_config(lowercase , eos_token_id=lowercase )
SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase ).eval()
SCREAMING_SNAKE_CASE : Optional[Any] = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
SCREAMING_SNAKE_CASE : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print("Done!" )
# update state dict keys
SCREAMING_SNAKE_CASE : Tuple = original_model.state_dict()
SCREAMING_SNAKE_CASE : int = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(lowercase )
if key.startswith("Qformer.bert" ):
SCREAMING_SNAKE_CASE : Any = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
SCREAMING_SNAKE_CASE : Dict = key.replace("self" , "attention" )
if "opt_proj" in key:
SCREAMING_SNAKE_CASE : Dict = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
SCREAMING_SNAKE_CASE : int = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
SCREAMING_SNAKE_CASE : Optional[int] = key.replace("opt" , "language" )
if key.startswith("t5" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace("t5" , "language" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
SCREAMING_SNAKE_CASE : Dict = load_demo_image()
SCREAMING_SNAKE_CASE : str = vis_processors["eval"](lowercase ).unsqueeze(0 ).to(lowercase )
SCREAMING_SNAKE_CASE : int = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(lowercase )
# create processor
SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=lowercase , image_std=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
SCREAMING_SNAKE_CASE : Dict = processor(images=lowercase , return_tensors="pt" ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
SCREAMING_SNAKE_CASE : List[str] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
SCREAMING_SNAKE_CASE : int = hf_model(lowercase , lowercase ).logits
else:
SCREAMING_SNAKE_CASE : Dict = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
SCREAMING_SNAKE_CASE : Any = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase )
else:
# cast to same type
SCREAMING_SNAKE_CASE : Tuple = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int = tokenizer(lowercase , return_tensors="pt" ).input_ids.to(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = original_model.generate({"image": original_pixel_values} )
SCREAMING_SNAKE_CASE : List[str] = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , lowercase )
SCREAMING_SNAKE_CASE : List[str] = input_ids.shape[1]
SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
SCREAMING_SNAKE_CASE : Any = [text.strip() for text in output_text]
print("HF generation:" , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
snake_case = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 62 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ):
# Initialise PyTorch model
__UpperCAmelCase : List[str] = AlbertConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
__UpperCAmelCase : Any = AlbertForPreTraining(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 63 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> Dict:
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[Any]= batch_size
SCREAMING_SNAKE_CASE__: Tuple= num_channels
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_size
SCREAMING_SNAKE_CASE__: List[str]= min_resolution
SCREAMING_SNAKE_CASE__: Dict= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize
SCREAMING_SNAKE_CASE__: Union[str, Any]= size if size is not None else {'''height''': 18, '''width''': 20}
SCREAMING_SNAKE_CASE__: Any= do_thumbnail
SCREAMING_SNAKE_CASE__: Dict= do_align_axis
SCREAMING_SNAKE_CASE__: List[str]= do_pad
SCREAMING_SNAKE_CASE__: Optional[int]= do_normalize
SCREAMING_SNAKE_CASE__: int= image_mean
SCREAMING_SNAKE_CASE__: Optional[Any]= image_std
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= DonutImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE__: int= self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCamelCase_ ( self ) -> List[str]:
pass
@is_flaky()
def UpperCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[str]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase_ ( self ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Dict= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Any= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 64 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0.0
for coeff in reversed(__UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = result * x + coeff
return result
if __name__ == "__main__":
__UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
__UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 65 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
snake_case = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
snake_case = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Vector , snake_case__ :Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(snake_case__ ) - np.asarray(snake_case__ )) ** 2 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Vector , snake_case__ :Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(snake_case__ , snake_case__ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
benchmark() | 67 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
from __future__ import annotations
def lowercase__ ( A_: int ) -> bool:
"""simple docstring"""
__UpperCAmelCase =str(A_ )
return len(A_ ) == 9 and set(A_ ) == set("""123456789""" )
def lowercase__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__UpperCAmelCase =100002 * base_num
if is_9_pandigital(A_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__UpperCAmelCase =1002003 * base_num
if is_9_pandigital(A_ ):
return candidate
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : str = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[Any] = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Union[str, Any] = {'''allegro/herbert-base-cased''': 514}
a : str = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = HerbertTokenizer
def __init__( self : Optional[Any] , a_ : str=None , a_ : Dict=None , a_ : Any=None , a_ : Any="<s>" , a_ : List[str]="<unk>" , a_ : int="<pad>" , a_ : Dict="<mask>" , a_ : Optional[int]="</s>" , **a_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
a_ , a_ , tokenizer_file=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , sep_token=a_ , **a_ , )
def A ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def A ( self : Optional[int] , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 69 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[int]] , lowercase : int , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
if index == len(lowercase ):
return True
# Recursive Step
for i in range(lowercase ):
if valid_coloring(graph[index] , lowercase , lowercase ):
# Color current vertex
lowerCamelCase_ = i
# Validate coloring
if util_color(lowercase , lowercase , lowercase , index + 1 ):
return True
# Backtrack
lowerCamelCase_ = -1
return False
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[int]] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = [-1] * len(lowercase )
if util_color(lowercase , lowercase , lowercase , 0 ):
return colored_vertices
return []
| 70 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case (unittest.TestCase):
def __init__( self ,_snake_case ,_snake_case=3 ,_snake_case=32 ,_snake_case=3 ,_snake_case=10 ,_snake_case=[10, 20, 30, 40] ,_snake_case=[1, 1, 2, 1] ,_snake_case=True ,_snake_case=True ,_snake_case="relu" ,_snake_case=3 ,_snake_case=None ,):
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[Any] = embeddings_size
UpperCAmelCase_ : int = hidden_sizes
UpperCAmelCase_ : Tuple = depths
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Any = scope
UpperCAmelCase_ : str = len(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase__ ( self ):
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = FlaxRegNetModel(config=_snake_case )
UpperCAmelCase_ : int = model(_snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : int = FlaxRegNetForImageClassification(config=_snake_case )
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__A : Any =False
__A : List[str] =False
__A : Union[str, Any] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = FlaxRegNetModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
return
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_snake_case )
UpperCAmelCase_ : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : str = model_class(_snake_case )
UpperCAmelCase_ : Any = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
UpperCAmelCase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) ,expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[str] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case )
@jax.jit
def model_jitted(_snake_case ,**_snake_case ):
return model(pixel_values=_snake_case ,**_snake_case )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ : Tuple = model_jitted(**_snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[int] = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) ,len(_snake_case ) )
for jitted_output, output in zip(_snake_case ,_snake_case ):
self.assertEqual(jitted_output.shape ,output.shape )
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="np" )
UpperCAmelCase_ : int = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : Tuple = (1, 10_00)
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Any = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
| 71 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase : int = '''\
Text data.
Second line of data.'''
_UpperCAmelCase : Optional[int] = '''file'''
@pytest.fixture(scope='''session''' )
def UpperCamelCase ( lowercase_ : str ) -> int:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase =bytes(lowercase_ , '''utf-8''' )
with zstd.open(lowercase_ , '''wb''' ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def UpperCamelCase ( lowercase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , lowercase_ ) , '''w''' ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def UpperCamelCase ( lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any ) -> List[Any]:
'''simple docstring'''
lowercase ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase =input_paths[compression_format]
lowercase =tmp_path / '''cache'''
lowercase =DownloadConfig(cache_dir=lowercase_ , extract_compressed_file=lowercase_ )
lowercase =cached_path(lowercase_ , download_config=lowercase_ )
with open(lowercase_ ) as f:
lowercase =f.read()
with open(lowercase_ ) as f:
lowercase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase ='''custom_cache'''
lowercase ='''custom_extracted_dir'''
lowercase =tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowercase_ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
lowercase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase =xz_file
lowercase =(
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase_ )
)
lowercase =cached_path(lowercase_ , download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def UpperCamelCase ( lowercase_ : List[str] ) -> Any:
'''simple docstring'''
lowercase =str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
lowercase =str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def UpperCamelCase ( lowercase_ : int ) -> Dict:
'''simple docstring'''
lowercase =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
lowercase ='''./__missing_file__.txt'''
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def UpperCamelCase ( lowercase_ : List[str] ) -> Tuple:
'''simple docstring'''
lowercase =get_from_cache(f'tmp://{tmpfs_file}' )
with open(lowercase_ ) as f:
lowercase =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(lowercase_ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def UpperCamelCase ( lowercase_ : Dict ) -> int:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
http_get('''https://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def UpperCamelCase ( lowercase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase_ )
def UpperCamelCase ( lowercase_ : Dict ) -> Any:
'''simple docstring'''
lowercase =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase_ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head('''s3://huggingface.co''' )
| 72 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , a , a , a = True , a = False) -> Optional[int]:
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(a , (list, tuple)) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[Any]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*a , **a)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*a , **a)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(a):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps'):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*a , **a)
else:
self.scheduler.step(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
self.scheduler.load_state_dict(a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> str:
return self.scheduler.print_lr(*a , **a)
| 73 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
return "".join(chr(ord(snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 74 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''GLPNFeatureExtractor''']
UpperCamelCase__ = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : str = 1_92
__lowercase : Tuple = 7_68
__lowercase : str = 12
__lowercase : str = 3
__lowercase : Optional[Any] = [8_00, 13_33]
__lowercase : Optional[Any] = False
elif yolos_name == "yolos_s_dWr":
__lowercase : Any = 3_30
__lowercase : List[str] = 14
__lowercase : Tuple = 6
__lowercase : Any = 13_20
elif "yolos_s" in yolos_name:
__lowercase : Tuple = 3_84
__lowercase : Any = 15_36
__lowercase : List[str] = 12
__lowercase : Dict = 6
elif "yolos_b" in yolos_name:
__lowercase : int = [8_00, 13_44]
__lowercase : Any = 91
__lowercase : str = '''huggingface/label-files'''
__lowercase : str = '''coco-detection-id2label.json'''
__lowercase : Optional[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : str = idalabel
__lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Tuple = in_proj_weight[: config.hidden_size, :]
__lowercase : Any = in_proj_bias[: config.hidden_size]
__lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Any = in_proj_weight[-config.hidden_size :, :]
__lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( __UpperCamelCase ):
if "backbone" in name:
__lowercase : List[str] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__lowercase : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__lowercase : Optional[Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__lowercase : List[str] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__lowercase : Dict = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowercase : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__lowercase : Tuple = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__lowercase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowercase : Optional[int] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase : Tuple = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase : Dict = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__lowercase : Union[str, Any] = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__lowercase : Dict = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__lowercase : Optional[int] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
__lowercase : Tuple = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
__lowercase : List[Any] = key.split('''.''' )
__lowercase : Union[str, Any] = int(key_split[2] )
__lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : str = val[:dim, :]
__lowercase : int = val[
dim : dim * 2, :
]
__lowercase : Union[str, Any] = val[-dim:, :]
else:
__lowercase : Any = val[:dim]
__lowercase : Optional[Any] = val[dim : dim * 2]
__lowercase : int = val[-dim:]
else:
__lowercase : Tuple = val
return orig_state_dict
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ):
__lowercase : List[Any] = get_yolos_config(__UpperCamelCase )
# load original state_dict
__lowercase : Optional[int] = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
__lowercase : Any = YolosForObjectDetection(__UpperCamelCase )
model.eval()
__lowercase : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : List[str] = 8_00 if yolos_name != '''yolos_ti''' else 5_12
__lowercase : Union[str, Any] = YolosImageProcessor(format='''coco_detection''' , size=__UpperCamelCase )
__lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase : str = model(**__UpperCamelCase )
__lowercase ,__lowercase : Dict = outputs.logits, outputs.pred_boxes
__lowercase ,__lowercase : Dict = None, None
if yolos_name == "yolos_ti":
__lowercase : Optional[int] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Dict = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : Union[str, Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : Union[str, Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : List[str] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : Dict = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : str = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : int = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
__lowercase : Any = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__lowercase : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
model.push_to_hub(__UpperCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 76 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A = logging.get_logger(__name__)
A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Model type selected in the list: " + ", ".join(__magic_name__ )} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowercase_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase_ = field(
default=1_2_8 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowercase_ = field(
default=6_4 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowercase_ = field(
default=3_0 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowercase_ = field(
default=__magic_name__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowercase_ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowercase_ = field(
default=2_0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowercase_ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowercase_ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class a__ ( __magic_name__ ):
lowercase_ = "train"
lowercase_ = "dev"
class a__ ( __magic_name__ ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
def __init__( self : Tuple , UpperCamelCase_ : SquadDataTrainingArguments , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Union[str, Split] = Split.train , UpperCamelCase_ : Optional[bool] = False , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = "pt" , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = args
__UpperCAmelCase : Any = is_language_sensitive
__UpperCAmelCase : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(UpperCamelCase_ , UpperCamelCase_):
try:
__UpperCAmelCase : Any = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
__UpperCAmelCase : Optional[int] = mode
# Load data features from cache or dataset file
__UpperCAmelCase : int = "v2" if args.version_2_with_negative else "v1"
__UpperCAmelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase : Optional[int] = cached_features_file + ".lock"
with FileLock(UpperCamelCase_):
if os.path.exists(UpperCamelCase_) and not args.overwrite_cache:
__UpperCAmelCase : Union[str, Any] = time.time()
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase_)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase : Optional[Any] = self.old_features["features"]
__UpperCAmelCase : int = self.old_features.get("dataset" , UpperCamelCase_)
__UpperCAmelCase : Tuple = self.old_features.get("examples" , UpperCamelCase_)
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run")
else:
if mode == Split.dev:
__UpperCAmelCase : Union[str, Any] = self.processor.get_dev_examples(args.data_dir)
else:
__UpperCAmelCase : List[str] = self.processor.get_train_examples(args.data_dir)
__UpperCAmelCase , __UpperCAmelCase : int = squad_convert_examples_to_features(
examples=self.examples , tokenizer=UpperCamelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase_ , )
__UpperCAmelCase : Union[str, Any] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , UpperCamelCase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]")
def __len__( self : Union[str, Any]):
"""simple docstring"""
return len(self.features)
def __getitem__( self : int , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.features[i]
__UpperCAmelCase : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long)
__UpperCAmelCase : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long)
__UpperCAmelCase : int = torch.tensor(feature.token_type_ids , dtype=torch.long)
__UpperCAmelCase : str = torch.tensor(feature.cls_index , dtype=torch.long)
__UpperCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float)
__UpperCAmelCase : List[Any] = torch.tensor(feature.is_impossible , dtype=torch.float)
__UpperCAmelCase : str = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible})
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
__UpperCAmelCase : Tuple = torch.tensor(feature.start_position , dtype=torch.long)
__UpperCAmelCase : List[Any] = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
return inputs
| 77 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: List[str] ={
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =[
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=None , ):
UpperCAmelCase__ : Optional[int] = size if size is not None else {"""shortest_edge""": 18}
UpperCAmelCase__ : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = num_channels
UpperCAmelCase__ : Optional[Any] = num_frames
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : List[str] = min_resolution
UpperCAmelCase__ : List[str] = max_resolution
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : List[Any] = image_mean
UpperCAmelCase__ : Union[str, Any] = image_std
UpperCAmelCase__ : List[str] = crop_size
def __UpperCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = VivitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = VivitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCAmelCase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
UpperCAmelCase__ : Tuple = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
UpperCAmelCase__ : List[str] = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : int = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __UpperCAmelCase ( self ):
# Initialize image_processing
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : str = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase__ : str = image_processing(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 79 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = 10
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = [1, 2, 3, 4]
__lowercase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_lowerCAmelCase , self.block_size , 0 ) , _lowerCAmelCase )
def _a ( self : int ) -> str:
"""simple docstring"""
__lowercase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__lowercase , __lowercase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
def _a ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = """"""
__lowercase , __lowercase = process_story(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , [] )
self.assertEqual(_lowerCAmelCase , [] )
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__lowercase , __lowercase = process_story(_lowerCAmelCase )
__lowercase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = ["""It was the best of times."""]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = torch.tensor([1, 2, 3, 4] )
__lowercase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 0 ).numpy() , expected.numpy() )
def _a ( self : str ) -> Any:
"""simple docstring"""
__lowercase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 23 ).numpy() , expected.numpy() )
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_lowerCAmelCase , 1 ).numpy() , expected.numpy() )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = 101
__lowercase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase = compute_token_type_ids(_lowerCAmelCase , _lowerCAmelCase )
np.testing.assert_array_equal(_lowerCAmelCase , _lowerCAmelCase )
| 80 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : str = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 81 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 50 ):
UpperCAmelCase_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
"""simple docstring"""
def snake_case_ ( ):
'''simple docstring'''
for n in range(1, 1_00_00_00 ):
yield n * (n + 1) // 2
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : Dict = 2
while i * i <= n:
_lowerCamelCase : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def snake_case_ ( ):
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(A_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 83 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = F'''{sampling_rate}'''
lowercase = '1'
lowercase = 'f32le'
lowercase = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
lowercase = output_stream[0]
lowercase = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ):
lowercase = F'''{sampling_rate}'''
lowercase = '1'
if format_for_conversion == "s16le":
lowercase = 2
elif format_for_conversion == "f32le":
lowercase = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
lowercase = platform.system()
if system == "Linux":
lowercase = 'alsa'
lowercase = 'default'
elif system == "Darwin":
lowercase = 'avfoundation'
lowercase = ':0'
elif system == "Windows":
lowercase = 'dshow'
lowercase = 'default'
lowercase = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ):
if stream_chunk_s is not None:
lowercase = stream_chunk_s
else:
lowercase = chunk_length_s
lowercase = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
lowercase = np.intaa
lowercase = 2
elif format_for_conversion == "f32le":
lowercase = np.floataa
lowercase = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
lowercase = chunk_length_s / 6
lowercase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
lowercase = [stride_length_s, stride_length_s]
lowercase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase = datetime.datetime.now()
lowercase = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
lowercase = np.frombuffer(item['raw'] , dtype=__SCREAMING_SNAKE_CASE )
lowercase = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
lowercase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ):
lowercase = b''
lowercase , lowercase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
lowercase = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
lowercase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
lowercase = (_stride_left, stride_right)
lowercase = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
lowercase = False
yield item
lowercase = stride_left
lowercase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
lowercase = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
lowercase = False
yield item
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
lowercase = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 84 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'perceiver'
def __init__( self : List[Any] , a_ : List[Any]=256 , a_ : List[Any]=1280 , a_ : str=768 , a_ : List[Any]=1 , a_ : Tuple=26 , a_ : Optional[Any]=8 , a_ : str=8 , a_ : int=None , a_ : Dict=None , a_ : Dict="kv" , a_ : List[Any]=1 , a_ : Dict=1 , a_ : Dict="gelu" , a_ : Dict=0.1 , a_ : Optional[Any]=0.02 , a_ : Dict=1e-1_2 , a_ : Union[str, Any]=True , a_ : Optional[int]=262 , a_ : str=2048 , a_ : List[str]=56 , a_ : Dict=[368, 496] , a_ : int=16 , a_ : str=1920 , a_ : List[str]=16 , a_ : List[str]=[1, 16, 224, 224] , **a_ : Union[str, Any] , )-> Tuple:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = num_latents
SCREAMING_SNAKE_CASE__ : Dict = d_latents
SCREAMING_SNAKE_CASE__ : Optional[Any] = d_model
SCREAMING_SNAKE_CASE__ : str = num_blocks
SCREAMING_SNAKE_CASE__ : Tuple = num_self_attends_per_block
SCREAMING_SNAKE_CASE__ : List[str] = num_self_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = num_cross_attention_heads
SCREAMING_SNAKE_CASE__ : Any = qk_channels
SCREAMING_SNAKE_CASE__ : Tuple = v_channels
SCREAMING_SNAKE_CASE__ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE__ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE__ : Dict = cross_attention_widening_factor
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
# flow attributes
SCREAMING_SNAKE_CASE__ : Dict = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE__ : Tuple = num_frames
SCREAMING_SNAKE_CASE__ : int = audio_samples_per_frame
SCREAMING_SNAKE_CASE__ : Union[str, Any] = samples_per_patch
SCREAMING_SNAKE_CASE__ : Optional[int] = output_shape
class snake_case ( UpperCamelCase_ ):
@property
def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __lowercase( self : Tuple )-> float:
"""simple docstring"""
return 1e-4
def __lowercase( self : List[str] , a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a_ : int = -1 , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , )-> Mapping[str, Any]:
"""simple docstring"""
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(a_ , a_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : List[Any] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Any = preprocessor.num_special_tokens_to_add(a_ )
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [' '.join(['a'] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Tuple = dict(preprocessor(a_ , return_tensors=a_ ) )
SCREAMING_SNAKE_CASE__ : Dict = inputs.pop('input_ids' )
return inputs
elif isinstance(a_ , a_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : Dict = compute_effective_axis_dimension(a_ , fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._generate_dummy_images(a_ , a_ , a_ , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = dict(preprocessor(images=a_ , return_tensors=a_ ) )
SCREAMING_SNAKE_CASE__ : str = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 85 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__a :int = '\\n\n'
__a :Any = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__a :List[str] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int = 16 , UpperCAmelCase : bool = True , UpperCAmelCase : List[Any]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A_ = "cuda"
else:
A_ = "cuda" if torch.cuda.is_available() else "cpu"
A_ = AutoModelForCausalLM.from_pretrained(UpperCAmelCase )
A_ = model.to(UpperCAmelCase )
A_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A_ = model.config.max_length - 1
else:
A_ = model.config.max_length
A_ = tokenizer(
UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="pt" , return_attention_mask=UpperCAmelCase , ).to(UpperCAmelCase )
A_ = encodings["input_ids"]
A_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A_ = []
A_ = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(UpperCAmelCase ) , UpperCAmelCase ) ):
A_ = min(start_index + batch_size , len(UpperCAmelCase ) )
A_ = encoded_texts[start_index:end_index]
A_ = attn_masks[start_index:end_index]
if add_start_token:
A_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase )
A_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase ), attn_mask] , dim=1 )
A_ = encoded_batch
with torch.no_grad():
A_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).logits
A_ = out_logits[..., :-1, :].contiguous()
A_ = labels[..., 1:].contiguous()
A_ = attn_mask[..., 1:].contiguous()
A_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase )} | 86 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for data in source_data:
for i, el in enumerate(lowercase_ ):
if len(lowercase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowercase_ ) )
return data_lists
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = []
for dlist, weight in zip(lowercase_ , lowercase_ ):
A__ = min(lowercase_ )
A__ = max(lowercase_ )
A__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A__ = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowercase_ )
score_lists.append(lowercase_ )
return score_lists
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[float]:
"""simple docstring"""
A__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowercase_ ):
A__ = final_scores[j] + ele
return final_scores
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> list[list[float]]:
"""simple docstring"""
A__ = get_data(lowercase_ )
A__ = calculate_each_score(lowercase_ , lowercase_ )
A__ = generate_final_scores(lowercase_ )
# append scores to source data
for i, ele in enumerate(lowercase_ ):
source_data[i].append(lowercase_ )
return source_data
| 87 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE = 0) -> Optional[int]:
_lowerCamelCase : Dict = key
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> list[str]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> list[str]:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(SCREAMING_SNAKE_CASE) ^ key) for ch in content]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0) -> str:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE) ^ key)
return ans
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0) -> str:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_lowerCamelCase : Optional[Any] = """"""
for ch in content:
ans += chr(ord(SCREAMING_SNAKE_CASE) ^ key)
return ans
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
try:
with open(SCREAMING_SNAKE_CASE) as fin, open("""encrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
try:
with open(SCREAMING_SNAKE_CASE) as fin, open("""decrypt.out""" , """w+""") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 88 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowerCamelCase_ ).groups()[0]
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None) -> List[str]:
"""simple docstring"""
_lowercase : int = file_names
_lowercase : List[str] = image_transform
_lowercase : List[Any] = label_to_id
def __len__( self) -> str:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.file_names[idx]
_lowercase : Tuple = PIL.Image.open(lowerCamelCase)
_lowercase : Any = raw_image.convert('RGB')
if self.image_transform is not None:
_lowercase : Dict = self.image_transform(lowerCamelCase)
_lowercase : Dict = extract_label(lowerCamelCase)
if self.label_to_id is not None:
_lowercase : List[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
# Initialize accelerator
if args.with_tracking:
_lowercase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
_lowercase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase : List[str] = config['lr']
_lowercase : Optional[int] = int(config['num_epochs'] )
_lowercase : Union[str, Any] = int(config['seed'] )
_lowercase : Dict = int(config['batch_size'] )
_lowercase : List[str] = config['image_size']
if not isinstance(lowerCamelCase_ , (list, tuple) ):
_lowercase : List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
_lowercase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
_lowercase : int = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
_lowercase : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
_lowercase : Tuple = os.path.split(lowerCamelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowerCamelCase_ , lowerCamelCase_ )
# Grab all the image filenames
_lowercase : str = [os.path.join(args.data_dir , lowerCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
_lowercase : List[str] = [extract_label(lowerCamelCase_ ) for fname in file_names]
_lowercase : Dict = list(set(lowerCamelCase_ ) )
id_to_label.sort()
_lowercase : Optional[int] = {lbl: i for i, lbl in enumerate(lowerCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(lowerCamelCase_ )
torch.manual_seed(lowerCamelCase_ )
torch.cuda.manual_seed_all(lowerCamelCase_ )
# Split our filenames between train and validation
_lowercase : Optional[Any] = np.random.permutation(len(lowerCamelCase_ ) )
_lowercase : str = int(0.8 * len(lowerCamelCase_ ) )
_lowercase : Any = random_perm[:cut]
_lowercase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
_lowercase : Optional[int] = Compose([RandomResizedCrop(lowerCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
_lowercase : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# For evaluation, we use a deterministic Resize
_lowercase : Union[str, Any] = Compose([Resize(lowerCamelCase_ ), ToTensor()] )
_lowercase : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowerCamelCase_ , label_to_id=lowerCamelCase_ )
# Instantiate dataloaders.
_lowercase : List[str] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
_lowercase : List[Any] = DataLoader(lowerCamelCase_ , shuffle=lowerCamelCase_ , batch_size=lowerCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase : List[str] = create_model('resnet50d' , pretrained=lowerCamelCase_ , num_classes=len(lowerCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
_lowercase : Dict = False
for param in model.get_classifier().parameters():
_lowercase : Any = True
# We normalize the batches of images to be a bit faster.
_lowercase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
_lowercase : Union[str, Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
_lowercase : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
_lowercase : List[Any] = OneCycleLR(optimizer=lowerCamelCase_ , max_lr=lowerCamelCase_ , epochs=lowerCamelCase_ , steps_per_epoch=len(lowerCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
_lowercase : Optional[int] = 0
# We also need to keep track of the starting epoch so files are named properly
_lowercase : List[Any] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
_lowercase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
_lowercase : List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
_lowercase : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
_lowercase : Union[str, Any] = os.path.splitext(lowerCamelCase_ )[0]
if "epoch" in training_difference:
_lowercase : Any = int(training_difference.replace('epoch_' , '' ) ) + 1
_lowercase : Optional[Any] = None
else:
_lowercase : Dict = int(training_difference.replace('step_' , '' ) )
_lowercase : Any = resume_step // len(lowerCamelCase_ )
resume_step -= starting_epoch * len(lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
model.train()
if args.with_tracking:
_lowercase : Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
_lowercase : Union[str, Any] = accelerator.skip_first_batches(lowerCamelCase_ , lowerCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
_lowercase : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase : List[Any] = (batch['image'] - mean) / std
_lowercase : Optional[int] = model(lowerCamelCase_ )
_lowercase : Union[str, Any] = torch.nn.functional.cross_entropy(lowerCamelCase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowerCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : Tuple = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
_lowercase : Tuple = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
model.eval()
_lowercase : List[str] = 0
_lowercase : List[Any] = 0
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
_lowercase : Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
_lowercase : List[Any] = (batch['image'] - mean) / std
with torch.no_grad():
_lowercase : Optional[Any] = model(lowerCamelCase_ )
_lowercase : Any = outputs.argmax(dim=-1 )
_lowercase , _lowercase : Optional[int] = accelerator.gather_for_metrics((predictions, batch['label']) )
_lowercase : int = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
_lowercase : Union[str, Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowerCamelCase_ ),
'epoch': epoch,
} , step=lowerCamelCase_ , )
if checkpointing_steps == "epoch":
_lowercase : Any = F'''epoch_{epoch}'''
if args.output_dir is not None:
_lowercase : Dict = os.path.join(args.output_dir , lowerCamelCase_ )
accelerator.save_state(lowerCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase_( ) -> Optional[Any]:
_lowercase : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowerCamelCase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowerCamelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowerCamelCase_ , default=lowerCamelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowerCamelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
_lowercase : str = parser.parse_args()
_lowercase : List[str] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 89 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__UpperCAmelCase = tuple[int, int]
class a__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return len(self.elements ) == 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase_ )
else:
# update
# print("update", item)
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
if item in self.set:
self.set.remove(lowerCamelCase_ )
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.elements[0][1]
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase_ )
return (priority, item)
def _snake_case ( A , A ) -> Tuple:
# euclidean distance
lowerCAmelCase__ = np.array(A )
lowerCAmelCase__ = np.array(A )
return np.linalg.norm(a - b )
def _snake_case ( A , A ) -> List[str]:
# integer division by time variable
return consistent_heuristic(A , A ) // t
def _snake_case ( A , A ) -> List[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _snake_case ( A , A , A , A ) -> str:
lowerCAmelCase__ = g_function[start] + Wa * heuristics[i](A , A )
return ans
def _snake_case ( A , A , A ) -> int:
lowerCAmelCase__ = np.chararray((n, n) )
for i in range(A ):
for j in range(A ):
lowerCAmelCase__ = '''*'''
for i in range(A ):
for j in range(A ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase__ = '''#'''
lowerCAmelCase__ = '''-'''
lowerCAmelCase__ = back_pointer[goal]
while x != start:
((lowerCAmelCase__) , (lowerCAmelCase__)) = x
# print(x)
lowerCAmelCase__ = '''-'''
lowerCAmelCase__ = back_pointer[x]
lowerCAmelCase__ = '''-'''
for i in range(A ):
for j in range(A ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowerCAmelCase__ = back_pointer[goal]
while x != start:
print(A , end=''' ''' )
lowerCAmelCase__ = back_pointer[x]
print(A )
sys.exit()
def _snake_case ( A ) -> Any:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _snake_case ( A , A , A , A , A , A , A , A , ) -> str:
for itera in range(A ):
open_list[itera].remove_element(A )
# print("s", s)
# print("j", j)
((lowerCAmelCase__) , (lowerCAmelCase__)) = s
lowerCAmelCase__ = (x - 1, y)
lowerCAmelCase__ = (x + 1, y)
lowerCAmelCase__ = (x, y + 1)
lowerCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A )
lowerCAmelCase__ = -1
lowerCAmelCase__ = float('''inf''' )
if valid(A ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase__ = g_function[s] + 1
lowerCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A , key(A , 0 , A , A ) )
if neighbours not in close_list_inad:
for var in range(1 , A ):
if key(A , A , A , A ) <= Wa * key(
A , 0 , A , A ):
open_list[j].put(
A , key(A , A , A , A ) )
def _snake_case ( ) -> str:
lowerCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCAmelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCAmelCase = make_common_ground()
__UpperCAmelCase = blocks_blk
# hyper parameters
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = 20
__UpperCAmelCase = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (n - 1, n - 1)
__UpperCAmelCase = 1
def _snake_case ( A , A , A ) -> Dict:
lowerCAmelCase__ = {start: 0, goal: float('''inf''' )}
lowerCAmelCase__ = {start: -1, goal: -1}
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for i in range(A ):
open_list.append(PriorityQueue() )
open_list[i].put(A , key(A , A , A , A ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , A ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(A , A , A )
else:
lowerCAmelCase__ , lowerCAmelCase__ = open_list[i].top_show()
visited.add(A )
expand_state(
A , A , A , A , A , A , A , A , )
close_list_inad.append(A )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(A , A , A )
else:
lowerCAmelCase__ = open_list[0].top_show()
visited.add(A )
expand_state(
A , 0 , A , A , A , A , A , A , )
close_list_anchor.append(A )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 90 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _snake_case ( snake_case__ : BertModel , snake_case__ : str , snake_case__ : str ):
A = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
A = model.state_dict()
def to_tf_var_name(snake_case__ : str ):
for patt, repl in iter(snake_case__ ):
A = name.replace(snake_case__ , snake_case__ )
return F'bert/{name}'
def create_tf_var(snake_case__ : np.ndarray , snake_case__ : str , snake_case__ : tf.Session ):
A = tf.dtypes.as_dtype(tensor.dtype )
A = tf.get_variable(dtype=snake_case__ , shape=tensor.shape , name=snake_case__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A = to_tf_var_name(snake_case__ )
A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A = torch_tensor.T
A = create_tf_var(tensor=snake_case__ , name=snake_case__ , session=snake_case__ )
tf.keras.backend.set_value(snake_case__ , snake_case__ )
A = session.run(snake_case__ )
print(F'Successfully created {tf_name}: {np.allclose(snake_case__ , snake_case__ )}' )
A = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__ , os.path.join(snake_case__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def _snake_case ( snake_case__ : Tuple=None ):
A = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=snake_case__ , required=snake_case__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=snake_case__ , required=snake_case__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=snake_case__ , required=snake_case__ , help='Directory in which to save tensorflow model' )
A = parser.parse_args(snake_case__ )
A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main() | 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def _lowerCAmelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> Tuple:
lowercase : Tuple =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : Tuple =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__magic_name__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Optional[Any] =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Optional[Any] =datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Union[str, Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[int] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Union[str, Any] =16
elif accelerator.mixed_precision != "no":
lowercase : Any =8
else:
lowercase : Union[str, Any] =None
return tokenizer.pad(
__magic_name__ , padding='''longest''' , max_length=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase : Union[str, Any] =DataLoader(
tokenized_datasets['''train'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
lowercase : Dict =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase_ = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : str ) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __magic_name__ ) == "1":
lowercase : List[Any] =2
# New Code #
lowercase : List[Any] =int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase : Optional[int] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : str =config['''lr''']
lowercase : Optional[int] =int(config['''num_epochs'''] )
lowercase : str =int(config['''seed'''] )
lowercase : Tuple =int(config['''batch_size'''] )
lowercase : Dict =evaluate.load('''glue''' , '''mrpc''' )
set_seed(__magic_name__ )
lowercase , lowercase : List[str] =get_dataloaders(__magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[str] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : str =model.to(accelerator.device )
# Instantiate optimizer
lowercase : Tuple =AdamW(params=model.parameters() , lr=__magic_name__ )
# Instantiate scheduler
lowercase : Tuple =get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=100 , num_training_steps=(len(__magic_name__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Dict =accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
lowercase : Dict =model(**__magic_name__ )
lowercase : Optional[Any] =output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : str =model(**__magic_name__ )
lowercase : str =outputs.logits.argmax(dim=-1 )
lowercase , lowercase : List[Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
lowercase : Optional[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __magic_name__ )
def _lowerCAmelCase ( ) -> str:
lowercase : Union[str, Any] =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__magic_name__ , default=__magic_name__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__magic_name__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase : List[Any] =parser.parse_args()
lowercase : Dict ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 92 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
"""simple docstring"""
import math
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 ) ->list:
"""simple docstring"""
lowerCAmelCase__ :List[str] = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = i
lowerCAmelCase__ :Any = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase__ :str = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase__ :Tuple = temp_index_value
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None: # Max Heap
"""simple docstring"""
lowerCAmelCase__ :Any = index
lowerCAmelCase__ :Optional[int] = 2 * index + 1 # Left Node
lowerCAmelCase__ :int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase__ :Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase__ :str = right_index
if largest != index:
lowerCAmelCase__ , lowerCAmelCase__ :Dict = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
lowerCAmelCase__ , lowerCAmelCase__ :int = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = low
lowerCAmelCase__ :int = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = array[j], array[i]
i += 1
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
lowerCAmelCase__ :Dict = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
lowerCAmelCase__ :Tuple = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
lowerCAmelCase__ :Any = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
lowerCAmelCase__ :Tuple = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("""Enter numbers separated by a comma : """).strip()
__A = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 93 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( __A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase , lowercase : List[Any] =image.size
lowercase , lowercase : str =(x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowercase : Tuple =image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase : int =np.array(__A ).astype(np.floataa ) / 255.0
lowercase : Union[str, Any] =image[None].transpose(0 , 3 , 1 , 2 )
lowercase : Optional[int] =torch.from_numpy(__A )
return 2.0 * image - 1.0
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 100 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : Tuple =1
elif isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : Optional[Any] =image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase )}' )
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[str] =preprocess(UpperCAmelCase )
lowercase , lowercase : int =image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase : Any =(batch_size, self.unet.config.in_channels // 2, height, width)
lowercase : Any =next(self.unet.parameters() ).dtype
lowercase : Union[str, Any] =randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
lowercase : Optional[int] =image.to(device=self.device , dtype=UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
lowercase : List[str] =self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase : Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : int ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : Any ={}
if accepts_eta:
lowercase : List[str] =eta
for t in self.progress_bar(UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase : str =torch.cat([latents, image] , dim=1 )
lowercase : Tuple =self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
# predict the noise residual
lowercase : List[str] =self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase : int =self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase : Any =self.vqvae.decode(UpperCAmelCase ).sample
lowercase : Union[str, Any] =torch.clamp(UpperCAmelCase , -1.0 , 1.0 )
lowercase : List[str] =image / 2 + 0.5
lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any =self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 94 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a ( __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__magic_name__: Optional[Any] = {}
with open(__UpperCAmelCase , """r""" ) as file:
for line_number, line in enumerate(__UpperCAmelCase ):
__magic_name__: Dict = line.strip()
if line:
__magic_name__: str = line.split()
__magic_name__: Tuple = line_number
__magic_name__: Tuple = words[0]
__magic_name__: Tuple = value
return result
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ) -> Tuple:
for attribute in key.split(""".""" ):
__magic_name__: Dict = getattr(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
__magic_name__: Any = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__: Optional[int] = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__: Tuple = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__magic_name__: str = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__magic_name__: List[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: int = shape_pointer.shape
# let's reduce dimension
__magic_name__: Any = value[0]
else:
__magic_name__: Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__: Tuple = value
elif weight_type == "weight_g":
__magic_name__: List[Any] = value
elif weight_type == "weight_v":
__magic_name__: int = value
elif weight_type == "bias":
__magic_name__: Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__magic_name__: Union[str, Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Dict = value
else:
__magic_name__: Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__magic_name__: int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
__magic_name__: Dict = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__magic_name__: List[str] = """param"""
if weight_type is not None and weight_type != "param":
__magic_name__: Optional[Any] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__magic_name__: Any = """.""".join([key, hf_param_name] )
else:
__magic_name__: Any = key
__magic_name__: Optional[int] = value if """lm_head""" in full_key else value[0]
__lowerCamelCase = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
__magic_name__: Tuple = False
for key, mapped_key in MAPPING.items():
__magic_name__: Any = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__magic_name__: Tuple = True
if "*" in mapped_key:
__magic_name__: Optional[Any] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__magic_name__: Tuple = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__magic_name__: Optional[Any] = """weight_g"""
elif "weight_v" in name:
__magic_name__: Optional[int] = """weight_v"""
elif "bias" in name:
__magic_name__: Tuple = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__magic_name__: Union[str, Any] = """weight"""
else:
__magic_name__: Optional[int] = None
if hf_dict is not None:
rename_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return is_used
return is_used
def a ( __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> Dict:
__magic_name__: List[str] = []
__magic_name__: int = fairseq_model.state_dict()
__magic_name__: Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__magic_name__: Any = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__magic_name__: int = True
else:
__magic_name__: Dict = load_wavaveca_layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> List[str]:
__magic_name__: List[str] = full_name.split("""conv_layers.""" )[-1]
__magic_name__: List[Any] = name.split(""".""" )
__magic_name__: int = int(items[0] )
__magic_name__: List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__: Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__: Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__: Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__: str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=False ) -> Any:
if config_path is not None:
__magic_name__: Tuple = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
else:
__magic_name__: int = WavaVecaConfig()
if is_seq_class:
__magic_name__: Optional[int] = read_txt_into_dict(__UpperCAmelCase )
__magic_name__: Any = idalabel
__magic_name__: Tuple = WavaVecaForSequenceClassification(__UpperCAmelCase )
__magic_name__: Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
feature_extractor.save_pretrained(__UpperCAmelCase )
elif is_finetuned:
if dict_path:
__magic_name__: Optional[int] = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__magic_name__: Dict = target_dict.pad_index
__magic_name__: Optional[Any] = target_dict.bos_index
__magic_name__: str = target_dict.eos_index
__magic_name__: List[str] = len(target_dict.symbols )
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__magic_name__: List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__magic_name__: List[str] = 0
__magic_name__: Union[str, Any] = 1
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__: Optional[Any] = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__magic_name__: Optional[Any] = True if config.feat_extract_norm == """layer""" else False
__magic_name__: Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__magic_name__: List[Any] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__magic_name__: List[Any] = WavaVecaForCTC(__UpperCAmelCase )
else:
__magic_name__: Dict = WavaVecaForPreTraining(__UpperCAmelCase )
if is_finetuned or is_seq_class:
__magic_name__, __magic_name__, __magic_name__: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__magic_name__: Optional[Any] = argparse.Namespace(task="""audio_pretraining""" )
__magic_name__: Optional[Any] = fairseq.tasks.setup_task(__UpperCAmelCase )
__magic_name__, __magic_name__, __magic_name__: Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
__magic_name__: List[Any] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 96 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return lst
lowercase_ = 1
while i < len(snake_case__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ = 1
return lst
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 97 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=lowercase, default=lowercase, required=lowercase, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=lowercase, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=lowercase, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=lowercase, default=42, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=lowercase, default=0, help='''cuda_id.''', )
_UpperCamelCase = parser.parse_args()
return args
def a__ ( lowercase : Tuple, lowercase : int, lowercase : int ) -> List[str]:
"""simple docstring"""
if not len(lowercase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_UpperCamelCase , _UpperCamelCase = imgs[0].size
_UpperCamelCase = Image.new('''RGB''', size=(cols * w, rows * h) )
_UpperCamelCase , _UpperCamelCase = grid.size
for i, img in enumerate(lowercase ):
grid.paste(lowercase, box=(i % cols * w, i // cols * h) )
return grid
def a__ ( lowercase : List[str], lowercase : int="robotic cat with wings", lowercase : Optional[int]=7.5, lowercase : List[str]=50, lowercase : List[str]=1, lowercase : str=42, ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(lowercase )
_UpperCamelCase = pipeline(
lowercase, guidance_scale=lowercase, num_inference_steps=lowercase, generator=lowercase, num_images_per_prompt=lowercase, ).images
_UpperCamelCase = int(math.sqrt(lowercase ) )
_UpperCamelCase = image_grid(lowercase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
lowercase__ : List[Any] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase__ : Tuple = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase__ : int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase__ : int = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase__ : Optional[Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase__ : int = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase__ : str = unet.to(torch.device('cuda', args.cuda_id))
lowercase__ : str = pipeline.to(unet.device)
lowercase__ , lowercase__ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase__ : Dict = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 98 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = 0
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__a = AutoFeatureExtractor.from_pretrained(__A ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__a = WavaVecaFeatureExtractor(**__A )
# save in new folder
model_config.save_pretrained(__A )
config.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
# make sure private variable is not incorrectly saved
__a = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
__a = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__a = AutoFeatureExtractor.from_pretrained(__A , revision="""aaaaaa""" )
def snake_case_ ( self ):
with self.assertRaisesRegex(
__A , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__a = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case_ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def snake_case_ ( self ):
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoFeatureExtractor.register(__A , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomFeatureExtractor.from_pretrained(__A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__A )
__a = AutoFeatureExtractor.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self ):
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = True
try:
AutoConfig.register("""custom""" , __A )
AutoFeatureExtractor.register(__A , __A )
# If remote code is not set, the default is to use local
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__a = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=__A )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(__A , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 99 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __snake_case ( lowerCAmelCase_ ) -> None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = analyze_text(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE__ = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE__ = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE__ = single_char_strings[ch]
SCREAMING_SNAKE_CASE__ = my_str / all_sum
my_fir_sum += prob * math.loga(lowerCAmelCase_ ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
SCREAMING_SNAKE_CASE__ = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE__ = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE__ = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE__ = two_char_strings[sequence]
SCREAMING_SNAKE_CASE__ = int(lowerCAmelCase_ ) / all_sum
my_sec_sum += prob * math.loga(lowerCAmelCase_ )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def __snake_case ( lowerCAmelCase_ ) -> tuple[dict, dict]:
SCREAMING_SNAKE_CASE__ = Counter() # type: ignore
SCREAMING_SNAKE_CASE__ = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __snake_case ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Any = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : str = num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FalconModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = FalconModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Dict = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = FalconModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE_ : Dict = alibi
self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = FalconForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE_ : Optional[int] = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ )
for layer in range(len(lowerCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : Dict = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : Dict = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase__ , 'use_cache' ):
return
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
getattr(lowerCAmelCase__ , 'decoder_layers' , lowerCAmelCase__ )
or getattr(lowerCAmelCase__ , 'num_decoder_layers' , lowerCAmelCase__ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase__ , 'd_model' , config.hidden_size )
SCREAMING_SNAKE_CASE_ : List[str] = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = outputs['past_key_values']
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs['input_ids'].shape
for i in range(lowerCAmelCase__ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE_ : Any = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE_ : Any = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE_ : int = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(lowerCAmelCase__ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 101 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 0 |
"""simple docstring"""
import re
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
__magic_name__ : List[str] = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 102 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 16000 ) -> str:
_snake_case = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase_ ) <= sample_length:
return wav
_snake_case = randint(0 , len(lowerCAmelCase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
A__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Name of a dataset from the datasets package'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
A__ : str = field(
default='''train''',metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
},)
A__ : str = field(
default='''validation''',metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
},)
A__ : str = field(
default='''audio''',metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''},)
A__ : str = field(
default='''label''',metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
A__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
},)
A__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
},)
A__ : float = field(
default=20,metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''},)
@dataclass
class UpperCAmelCase :
A__ : str = field(
default='''facebook/wav2vec2-base''',metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''},)
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
A__ : str = field(
default='''main''',metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''},)
A__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Name or path of preprocessor config.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
},)
A__ : Optional[bool] = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
A__ : bool = field(
default=__SCREAMING_SNAKE_CASE,metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''},)
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def snake_case ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case = DatasetDict()
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase_ ):
_snake_case = []
for audio in batch[data_args.audio_column_name]:
_snake_case = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase_ )
_snake_case = feature_extractor(lowerCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(lowerCAmelCase_ )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase_ ):
_snake_case = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_snake_case = feature_extractor(lowerCAmelCase_ , sampling_rate=feature_extractor.sampling_rate )
_snake_case = {model_input_name: inputs.get(lowerCAmelCase_ )}
_snake_case = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case = raw_datasets['''train'''].features[data_args.label_column_name].names
_snake_case , _snake_case = {}, {}
for i, label in enumerate(lowerCAmelCase_ ):
_snake_case = str(lowerCAmelCase_ )
_snake_case = label
# Load the accuracy metric from the datasets package
_snake_case = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
_snake_case = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase_ , references=eval_pred.label_ids )
_snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase_ , output_all_columns=lowerCAmelCase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase_ , output_all_columns=lowerCAmelCase_ )
# Initialize our trainer
_snake_case = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
_snake_case = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 103 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _lowerCamelCase ( UpperCAmelCase_ : int = 1000000, UpperCAmelCase_ : int = 10 ) -> int:
"""simple docstring"""
A__ = defaultdict(UpperCAmelCase_ )
for outer_width in range(3, (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ), 1 )
else:
A__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCAmelCase_, outer_width - 1, 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
import torch
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=1 ,snake_case__=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = n_token
SCREAMING_SNAKE_CASE_ : Tuple = d_embed
SCREAMING_SNAKE_CASE_ : int = d_proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cutoffs + [n_token]
SCREAMING_SNAKE_CASE_ : Dict = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ : int = div_val
SCREAMING_SNAKE_CASE_ : Tuple = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ : str = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
SCREAMING_SNAKE_CASE_ : int = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE_ : int = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__ ,snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__ ,r_idx - l_idx ) )
SCREAMING_SNAKE_CASE_ : Any = keep_order
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if proj is None:
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE_ : int = nn.functional.linear(snake_case__ ,proj.t().contiguous() )
SCREAMING_SNAKE_CASE_ : Any = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=False ):
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE_ : Dict = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : str = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE_ : Tuple = labels != -100
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : int = (
-nn.functional.log_softmax(snake_case__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : str = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE_ : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE_ : Any = labels.index_select(0 ,snake_case__ ) - l_idx
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob.index_select(0 ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = hidden.index_select(0 ,snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : Any = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : int = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
if labels is not None:
if (hasattr(self ,'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,snake_case__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case ( self ,snake_case__ ):
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE_ : str = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
return out
| 105 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[int] ={
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple =[
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__snake_case :int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : List[str] = {
'''configuration_data2vec_audio''': ['''DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Data2VecAudioConfig'''],
'''configuration_data2vec_text''': [
'''DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecTextConfig''',
'''Data2VecTextOnnxConfig''',
],
'''configuration_data2vec_vision''': [
'''DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Data2VecVisionConfig''',
'''Data2VecVisionOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
'''DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecAudioForAudioFrameClassification''',
'''Data2VecAudioForCTC''',
'''Data2VecAudioForSequenceClassification''',
'''Data2VecAudioForXVector''',
'''Data2VecAudioModel''',
'''Data2VecAudioPreTrainedModel''',
]
_UpperCAmelCase : int = [
'''DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecTextForCausalLM''',
'''Data2VecTextForMaskedLM''',
'''Data2VecTextForMultipleChoice''',
'''Data2VecTextForQuestionAnswering''',
'''Data2VecTextForSequenceClassification''',
'''Data2VecTextForTokenClassification''',
'''Data2VecTextModel''',
'''Data2VecTextPreTrainedModel''',
]
_UpperCAmelCase : str = [
'''DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Data2VecVisionForImageClassification''',
'''Data2VecVisionForMaskedImageModeling''',
'''Data2VecVisionForSemanticSegmentation''',
'''Data2VecVisionModel''',
'''Data2VecVisionPreTrainedModel''',
]
if is_tf_available():
_UpperCAmelCase : Optional[int] = [
'''TFData2VecVisionForImageClassification''',
'''TFData2VecVisionForSemanticSegmentation''',
'''TFData2VecVisionModel''',
'''TFData2VecVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 107 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__a: Tuple = '''sshleifer/bart-tiny-random'''
__a: Any = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=lowerCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , *_UpperCAmelCase = create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
with self.assertRaises(lowerCamelCase ):
create_student_by_copying_alternating_layers(lowerCamelCase , tempfile.mkdtemp() , e=lowerCamelCase , d=lowerCamelCase ) | 108 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( _snake_case ):
__UpperCamelCase : Dict = ['image_processor', 'tokenizer']
__UpperCamelCase : Any = 'ViltImageProcessor'
__UpperCamelCase : List[str] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Dict ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : int ,lowerCamelCase : Dict ,lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase : bool = True ,lowerCamelCase : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = 0 ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCamelCase ,add_special_tokens=lowerCamelCase ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=lowerCamelCase ,stride=lowerCamelCase ,pad_to_multiple_of=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_attention_mask=lowerCamelCase ,return_overflowing_tokens=lowerCamelCase ,return_special_tokens_mask=lowerCamelCase ,return_offsets_mapping=lowerCamelCase ,return_length=lowerCamelCase ,verbose=lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ,)
# add pixel_values + pixel_mask
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase ,return_tensors=lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def UpperCAmelCase__ ( self : Optional[int] ,*lowerCamelCase : Any ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ,*lowerCamelCase : Union[str, Any] ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase ,)
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase ,)
return self.image_processor
| 109 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case = 100 ):
UpperCAmelCase__ : Union[str, Any] = (n * (n + 1) // 2) ** 2
UpperCAmelCase__ : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 110 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_UpperCamelCase = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCAmelCase__ : Dict = Github(os.environ["GITHUB_TOKEN"] )
lowerCAmelCase__ : str = g.get_repo("huggingface/accelerate" )
lowerCAmelCase__ : Optional[Any] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowerCAmelCase__ )
lowerCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCAmelCase__ : Tuple = dt.utcnow()
lowerCAmelCase__ : str = (current_time - issue.updated_at).days
lowerCAmelCase__ : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 453 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
) | 518 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : Any , _UpperCamelCase : int = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : str = False , _UpperCamelCase : int = False , _UpperCamelCase : int = None , _UpperCamelCase : Optional[Any] = None , **_UpperCamelCase : Optional[int] , ):
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowercase: Optional[Any] = field
_lowercase: Optional[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowercase: List[str] = Json(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , field=_UpperCamelCase , **_UpperCamelCase , )
def UpperCAmelCase__ ( self : Optional[Any]):
# Build iterable dataset
if self.streaming:
_lowercase: str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowercase: Optional[int] = None
_lowercase: Optional[int] = None
_lowercase: Any = None
_lowercase: Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowercase: List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
class A :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] = None , _UpperCamelCase : Tuple = None , **_UpperCamelCase : List[str] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
_lowercase: Optional[int] = dataset
_lowercase: List[str] = path_or_buf
_lowercase: List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowercase: List[str] = num_proc
_lowercase: Union[str, Any] = "utf-8"
_lowercase: Union[str, Any] = to_json_kwargs
def UpperCAmelCase__ ( self : int):
_lowercase: int = self.to_json_kwargs.pop("path_or_buf" , _UpperCamelCase)
_lowercase: Optional[Any] = self.to_json_kwargs.pop("orient" , "records")
_lowercase: Union[str, Any] = self.to_json_kwargs.pop("lines" , True if orient == "records" else False)
_lowercase: List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True)
_lowercase: str = self.to_json_kwargs.pop("compression" , _UpperCamelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , "wb" , compression=_UpperCamelCase) as buffer:
_lowercase: str = self._write(file_obj=_UpperCamelCase , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead.")
_lowercase: List[Any] = self._write(
file_obj=self.path_or_buf , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **self.to_json_kwargs)
return written
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Tuple):
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: List[Any] = args
_lowercase: int = query_table(
table=self.dataset.data , key=slice(_UpperCamelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_lowercase: Union[str, Any] = batch.to_pandas().to_json(
path_or_buf=_UpperCamelCase , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **_UpperCamelCase)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCAmelCase__ ( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Any , **_UpperCamelCase : Optional[Any] , ):
_lowercase: Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
_lowercase: str = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(_UpperCamelCase)
else:
_lowercase , _lowercase: str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _UpperCamelCase , _UpperCamelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(_UpperCamelCase)
return written
| 226 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 0 |
def lowerCAmelCase_ (lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase__ = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = (DDPMScheduler,)
def lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Optional[int] ):
'''simple docstring'''
_A = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__UpperCAmelCase )
return config
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = len(__UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
_A = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A = pred_prev_sample
_A = torch.sum(torch.abs(__UpperCAmelCase ) )
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type="v_prediction" )
_A = scheduler_class(**__UpperCAmelCase )
_A = len(__UpperCAmelCase )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(__UpperCAmelCase ) ):
# 1. predict noise residual
_A = model(__UpperCAmelCase , __UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_A = pred_prev_sample
_A = torch.sum(torch.abs(__UpperCAmelCase ) )
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
_A = scheduler.timesteps
for i, timestep in enumerate(__UpperCAmelCase ):
if i == len(__UpperCAmelCase ) - 1:
_A = -1
else:
_A = timesteps[i + 1]
_A = scheduler.previous_timestep(__UpperCAmelCase )
_A = prev_t.item()
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = [100, 87, 50, 1, 0]
_A = len(__UpperCAmelCase )
with self.assertRaises(__UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__UpperCAmelCase , timesteps=__UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__UpperCAmelCase )
| 330 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 0 |
import argparse
_lowercase : int = "docs/source/_static/js/custom.js"
def _lowerCAmelCase ( UpperCamelCase__: int ) -> Any:
"""simple docstring"""
with open(lowerCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
A = f'const stableVersion = \"v{version}\"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f' \"v{version}\": \"v{version}\",\n'
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : List[Any] = parser.parse_args()
update_custom_js(args.version)
| 641 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] , _A : int , _A : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = jnp.ones((batch_size, length)) / length
return scores
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : str = 2_0
_SCREAMING_SNAKE_CASE : Dict = self._get_uniform_logits(batch_size=2 , length=_A)
# tweak scores to not be uniform anymore
_SCREAMING_SNAKE_CASE : Tuple = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
_SCREAMING_SNAKE_CASE : int = scores.at[1, 1_0].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
_SCREAMING_SNAKE_CASE : Optional[Any] = jax.nn.softmax(_A , axis=-1)
_SCREAMING_SNAKE_CASE : str = FlaxTemperatureLogitsWarper(temperature=0.5)
_SCREAMING_SNAKE_CASE : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3)
_SCREAMING_SNAKE_CASE : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(_A , scores.copy() , cur_len=_A) , axis=-1)
_SCREAMING_SNAKE_CASE : Any = jax.nn.softmax(temp_dist_warper_smoother(_A , scores.copy() , cur_len=_A) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = None
_SCREAMING_SNAKE_CASE : List[Any] = 1_0
_SCREAMING_SNAKE_CASE : Dict = 2
# create ramp distribution
_SCREAMING_SNAKE_CASE : Any = np.broadcast_to(np.arange(_A)[None, :] , (batch_size, vocab_size)).copy()
_SCREAMING_SNAKE_CASE : List[str] = ramp_logits[1:, : vocab_size // 2] + vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxTopKLogitsWarper(3)
_SCREAMING_SNAKE_CASE : Tuple = top_k_warp(_A , _A , cur_len=_A)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
_SCREAMING_SNAKE_CASE : Union[str, Any] = 5
_SCREAMING_SNAKE_CASE : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
_SCREAMING_SNAKE_CASE : str = np.broadcast_to(np.arange(_A)[None, :] , (batch_size, length)).copy()
_SCREAMING_SNAKE_CASE : Tuple = top_k_warp_safety_check(_A , _A , cur_len=_A)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0
_SCREAMING_SNAKE_CASE : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_SCREAMING_SNAKE_CASE : str = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
_SCREAMING_SNAKE_CASE : List[str] = FlaxTopPLogitsWarper(0.8)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.exp(top_p_warp(_A , _A , cur_len=_A))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_SCREAMING_SNAKE_CASE : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(_A , _A , atol=1e-3))
# check edge cases with negative and extreme logits
_SCREAMING_SNAKE_CASE : List[Any] = np.broadcast_to(np.arange(_A)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_SCREAMING_SNAKE_CASE : Dict = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
_SCREAMING_SNAKE_CASE : List[Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
_SCREAMING_SNAKE_CASE : Union[str, Any] = top_p_warp(_A , _A , cur_len=_A)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 2_0
_SCREAMING_SNAKE_CASE : str = 4
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : List[str] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A)
# check that min length is applied at length 5
_SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((batch_size, 2_0) , vocab_size=2_0)
_SCREAMING_SNAKE_CASE : Tuple = 5
_SCREAMING_SNAKE_CASE : str = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : List[str] = min_dist_processor(_A , _A , cur_len=_A)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""")])
# check that min length is not applied anymore at length 15
_SCREAMING_SNAKE_CASE : Optional[int] = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : List[Any] = 1_5
_SCREAMING_SNAKE_CASE : int = min_dist_processor(_A , _A , cur_len=_A)
self.assertFalse(jnp.isinf(_A).any())
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = 2_0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 4
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A)
# check that all scores are -inf except the bos_token_id score
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((batch_size, 1) , vocab_size=2_0)
_SCREAMING_SNAKE_CASE : Dict = 1
_SCREAMING_SNAKE_CASE : str = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = logits_processor(_A , _A , cur_len=_A)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_SCREAMING_SNAKE_CASE : List[Any] = 3
_SCREAMING_SNAKE_CASE : List[str] = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : Optional[int] = logits_processor(_A , _A , cur_len=_A)
self.assertFalse(jnp.isinf(_A).any())
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = 2_0
_SCREAMING_SNAKE_CASE : Tuple = 4
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 5
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A)
# check that all scores are -inf except the eos_token_id when max_length is reached
_SCREAMING_SNAKE_CASE : Any = ids_tensor((batch_size, 4) , vocab_size=2_0)
_SCREAMING_SNAKE_CASE : Optional[Any] = 4
_SCREAMING_SNAKE_CASE : Tuple = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : Any = logits_processor(_A , _A , cur_len=_A)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_SCREAMING_SNAKE_CASE : Union[str, Any] = 3
_SCREAMING_SNAKE_CASE : int = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : int = logits_processor(_A , _A , cur_len=_A)
self.assertFalse(jnp.isinf(_A).any())
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : Tuple = 1_0
_SCREAMING_SNAKE_CASE : str = 1_5
_SCREAMING_SNAKE_CASE : Any = 2
_SCREAMING_SNAKE_CASE : Optional[Any] = 1
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_5
# dummy input_ids and scores
_SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((batch_size, sequence_length) , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.copy()
_SCREAMING_SNAKE_CASE : List[str] = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = scores.copy()
# instantiate all dist processors
_SCREAMING_SNAKE_CASE : Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
_SCREAMING_SNAKE_CASE : Tuple = FlaxTopKLogitsWarper(3)
_SCREAMING_SNAKE_CASE : str = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A)
_SCREAMING_SNAKE_CASE : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0
# no processor list
_SCREAMING_SNAKE_CASE : str = temp_dist_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : str = top_k_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = top_p_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : List[Any] = min_dist_proc(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = bos_dist_proc(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = eos_dist_proc(_A , _A , cur_len=_A)
# with processor list
_SCREAMING_SNAKE_CASE : List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_SCREAMING_SNAKE_CASE : List[str] = processor(_A , _A , cur_len=_A)
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _lowerCAmelCase ( self : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = 4
_SCREAMING_SNAKE_CASE : Any = 1_0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1_5
_SCREAMING_SNAKE_CASE : Tuple = 2
_SCREAMING_SNAKE_CASE : Any = 1
_SCREAMING_SNAKE_CASE : Optional[Any] = 1_5
# dummy input_ids and scores
_SCREAMING_SNAKE_CASE : Tuple = ids_tensor((batch_size, sequence_length) , _A)
_SCREAMING_SNAKE_CASE : str = input_ids.copy()
_SCREAMING_SNAKE_CASE : int = self._get_uniform_logits(_A , _A)
_SCREAMING_SNAKE_CASE : str = scores.copy()
# instantiate all dist processors
_SCREAMING_SNAKE_CASE : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5)
_SCREAMING_SNAKE_CASE : Any = FlaxTopKLogitsWarper(3)
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
_SCREAMING_SNAKE_CASE : Any = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_A)
_SCREAMING_SNAKE_CASE : int = FlaxForcedEOSTokenLogitsProcessor(max_length=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : Dict = 1_0
# no processor list
def run_no_processor_list(_A : Union[str, Any] , _A : Any , _A : Tuple):
_SCREAMING_SNAKE_CASE : str = temp_dist_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Dict = top_k_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = top_p_warp(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = min_dist_proc(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : List[Any] = bos_dist_proc(_A , _A , cur_len=_A)
_SCREAMING_SNAKE_CASE : Any = eos_dist_proc(_A , _A , cur_len=_A)
return scores
# with processor list
def run_processor_list(_A : Tuple , _A : Union[str, Any] , _A : List[str]):
_SCREAMING_SNAKE_CASE : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor(_A , _A , cur_len=_A)
return scores
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.jit(_A)
_SCREAMING_SNAKE_CASE : Any = jax.jit(_A)
_SCREAMING_SNAKE_CASE : Any = jitted_run_no_processor_list(_A , _A , _A)
_SCREAMING_SNAKE_CASE : Any = jitted_run_processor_list(_A , _A , _A)
# scores should be equal
self.assertTrue(jnp.allclose(_A , _A , atol=1e-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 338 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = StableDiffusionDiffEditPipeline
lowercase_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase_ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase_ : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ : Dict = frozenset([] )
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCamelCase, )
_lowercase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
_lowercase : Optional[int] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', clip_sample=lowerCamelCase, set_alpha_to_zero=lowerCamelCase, )
torch.manual_seed(0)
_lowercase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=1_28, )
torch.manual_seed(0)
_lowercase : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, hidden_act='gelu', projection_dim=5_12, )
_lowercase : Union[str, Any] = CLIPTextModel(lowerCamelCase)
_lowercase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Optional[int] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Optional[Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Any:
"""simple docstring"""
_lowercase : Dict = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Any = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : str = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB')
if str(lowerCamelCase).startswith('mps'):
_lowercase : List[str] = torch.manual_seed(lowerCamelCase)
else:
_lowercase : Union[str, Any] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : Any = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> int:
"""simple docstring"""
_lowercase : int = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCamelCase)).to(lowerCamelCase)
_lowercase : Optional[int] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCamelCase)).convert('RGB')
if str(lowerCamelCase).startswith('mps'):
_lowercase : Tuple = torch.manual_seed(lowerCamelCase)
else:
_lowercase : int = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase)
_lowercase : List[Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self) -> str:
"""simple docstring"""
if not hasattr(self.pipeline_class, '_optional_components'):
return
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : str = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
_lowercase : Dict = self.get_dummy_inputs(lowerCamelCase)
_lowercase : str = pipe(**lowerCamelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase)
_lowercase : Dict = self.pipeline_class.from_pretrained(lowerCamelCase)
pipe_loaded.to(lowerCamelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase, lowerCamelCase) is None, F'''`{optional_component}` did not stay set to None after loading.''', )
_lowercase : Any = self.get_dummy_inputs(lowerCamelCase)
_lowercase : List[str] = pipe_loaded(**lowerCamelCase)[0]
_lowercase : Tuple = np.abs(output - output_loaded).max()
self.assertLess(lowerCamelCase, 1E-4)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : int = 'cpu'
_lowercase : Optional[int] = self.get_dummy_components()
_lowercase : List[str] = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Tuple = self.get_dummy_mask_inputs(lowerCamelCase)
_lowercase : List[Any] = pipe.generate_mask(**lowerCamelCase)
_lowercase : int = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16))
_lowercase : Tuple = np.array([0] * 9)
_lowercase : Optional[Any] = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
self.assertEqual(mask[0, -3, -4], 0)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = 'cpu'
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : int = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[str] = self.get_dummy_inversion_inputs(lowerCamelCase)
_lowercase : List[Any] = pipe.invert(**lowerCamelCase).images
_lowercase : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3))
_lowercase : Union[str, Any] = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9], )
_lowercase : List[Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = 'cpu'
_lowercase : Dict = self.get_dummy_components()
_lowercase : List[str] = {'beta_start': 0.0_0_0_8_5, 'beta_end': 0.0_1_2, 'beta_schedule': 'scaled_linear'}
_lowercase : Tuple = DPMSolverMultistepScheduler(**lowerCamelCase)
_lowercase : List[Any] = DPMSolverMultistepInverseScheduler(**lowerCamelCase)
_lowercase : Any = self.pipeline_class(**lowerCamelCase)
pipe.to(lowerCamelCase)
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCamelCase)
_lowercase : Any = pipe.invert(**lowerCamelCase).images
_lowercase : List[str] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3))
_lowercase : str = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9], )
_lowercase : str = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(lowerCamelCase, 1E-3)
@require_torch_gpu
@slow
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase ( cls) -> Dict:
"""simple docstring"""
_lowercase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png')
_lowercase : str = raw_image.convert('RGB').resize((7_68, 7_68))
_lowercase : Union[str, Any] = raw_image
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = torch.manual_seed(0)
_lowercase : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCamelCase, torch_dtype=torch.floataa)
_lowercase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config)
_lowercase : Optional[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Any = 'a bowl of fruit'
_lowercase : Any = 'a bowl of pears'
_lowercase : Optional[int] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCamelCase, target_prompt=lowerCamelCase, generator=lowerCamelCase, )
_lowercase : Dict = pipe.invert(
prompt=lowerCamelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCamelCase).latents
_lowercase : Dict = pipe(
prompt=lowerCamelCase, mask_image=lowerCamelCase, image_latents=lowerCamelCase, generator=lowerCamelCase, negative_prompt=lowerCamelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
_lowercase : Optional[int] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((7_68, 7_68)))
/ 2_55
)
assert np.abs((expected_image - image).max()) < 5E-1
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Dict = torch.manual_seed(0)
_lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCamelCase, torch_dtype=torch.floataa)
_lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Optional[int] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase)
_lowercase : Optional[int] = 'a bowl of fruit'
_lowercase : str = 'a bowl of pears'
_lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCamelCase, target_prompt=lowerCamelCase, generator=lowerCamelCase, )
_lowercase : Optional[int] = pipe.invert(
prompt=lowerCamelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCamelCase, num_inference_steps=25, ).latents
_lowercase : Optional[Any] = pipe(
prompt=lowerCamelCase, mask_image=lowerCamelCase, image_latents=lowerCamelCase, generator=lowerCamelCase, negative_prompt=lowerCamelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
_lowercase : int = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png').resize((7_68, 7_68)))
/ 2_55
)
assert np.abs((expected_image - image).max()) < 5E-1
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.