code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = 8
# DPR tok
lowerCamelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCamelCase__ = dict(zip(_lowerCAmelCase ,range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def UpperCamelCase_ ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCamelCase_ ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCamelCase_ ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dummy_dataset()
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
lowerCamelCase__ = dataset
lowerCamelCase__ = RagRetriever(
_lowerCAmelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.get_dummy_dataset()
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""custom""" ,)
if from_disk:
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""dataset""" )
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname ,"""index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"""dataset""" ) )
del dataset
lowerCamelCase__ = RagRetriever(
_lowerCAmelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
lowerCamelCase__ = RagRetriever(
_lowerCAmelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,_lowerCAmelCase ) ,)
return retriever
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" ,string_factory="""Flat""" ,metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" ,index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] ,open(index_file_name + """.index_meta.dpr""" ,"""wb""" ) )
lowerCamelCase__ = os.path.join(self.tmpdirname ,"""psgs_w100.tsv.pkl""" )
lowerCamelCase__ = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(_lowerCAmelCase ,open(_lowerCAmelCase ,"""wb""" ) )
lowerCamelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="""legacy""" ,index_path=self.tmpdirname ,)
lowerCamelCase__ = RagRetriever(
_lowerCAmelCase ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,_lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
lowerCamelCase__ = self.get_dummy_dataset()
retriever.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,_lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) ,_lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""id"""][0] ,"""1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] ,"""0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=1 )
self.assertTrue(out is not None )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_legacy_index_retriever()
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=_lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_lowerCAmelCase ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) ,_lowerCAmelCase )
self.assertEqual(doc_dicts[0]["""text"""][0] ,"""bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] ,"""foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_lowerCAmelCase )
lowerCamelCase__ = RagRetriever.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever.retrieve(_lowerCAmelCase ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase_ ( self ):
import torch
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCamelCase__ = [[5, 7], [10, 11]]
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever(_lowerCAmelCase ,_lowerCAmelCase ,prefix=retriever.config.generator.prefix ,n_docs=_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase ,np.ndarray )
lowerCamelCase__ = retriever(
_lowerCAmelCase ,_lowerCAmelCase ,prefix=retriever.config.generator.prefix ,n_docs=_lowerCAmelCase ,return_tensors="""pt""" ,)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
self.assertIsInstance(_lowerCAmelCase ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCamelCase_ ( self ):
lowerCamelCase__ = self.get_dpr_ctx_encoder_tokenizer()
lowerCamelCase__ = 1
lowerCamelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=_lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(_lowerCAmelCase )
lowerCamelCase__ = [[5, 7], [10, 11]]
lowerCamelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
lowerCamelCase__ = retriever(_lowerCAmelCase ,_lowerCAmelCase ,prefix=retriever.config.generator.prefix ,n_docs=_lowerCAmelCase )
self.assertEqual(
len(_lowerCAmelCase ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) ,_lowerCAmelCase ) # check for doc token related keys in dictionary.
| 50 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'bart'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,_lowerCAmelCase=5_02_65 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=False ,_lowerCAmelCase=True ,_lowerCAmelCase=3 ,_lowerCAmelCase=1 ,_lowerCAmelCase=0 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase=2 ,_lowerCAmelCase=2 ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" ,_lowerCAmelCase ):
lowerCamelCase__ = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ = {0: """batch"""}
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def UpperCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super().outputs
else:
lowerCamelCase__ = super(_lowerCAmelCase ,self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Generate decoder inputs
lowerCamelCase__ = seq_length if not self.use_past else 1
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ = dict(**_lowerCAmelCase ,**_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
lowerCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = decoder_seq_length + 3
lowerCamelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase )] ,dim=1 )
lowerCamelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ = min(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = max(_lowerCAmelCase ,_lowerCAmelCase ) - min_num_layers
lowerCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
lowerCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCAmelCase ,_lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = common_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ = dict(tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
elif self.task == "causal-lm":
lowerCamelCase__ = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
else:
lowerCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super()._flatten_past_key_values_(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
lowerCamelCase__ = super(_lowerCAmelCase ,self )._flatten_past_key_values_(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
| 50 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase__ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase__ = DisjunctiveConstraint(_lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids ,_lowerCAmelCase ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase_ ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(_lowerCAmelCase ) # fails here
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase__ = DisjunctiveConstraint(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
lowerCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
lowerCamelCase__ = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(3 )
lowerCamelCase__ = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase__ = DisjunctiveConstraint(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 10**12 ):
lowerCamelCase__ = 1
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 1 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 1 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = jnp.floataa
_UpperCamelCase = True
def UpperCamelCase_ ( self ):
super().setup()
lowerCamelCase__ = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
lowerCamelCase__ = super().__call__(*_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
def cross_entropy(__lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None ):
lowerCamelCase__ = logits.shape[-1]
lowerCamelCase__ = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype("""f4""" )
lowerCamelCase__ = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 )
lowerCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__ = reduction(__lowerCAmelCase )
return loss
lowerCamelCase__ = partial(__lowerCAmelCase , reduction=jnp.mean )
lowerCamelCase__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = cross_entropy(__lowerCAmelCase , __lowerCAmelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = "google/bigbird-roberta-base"
_UpperCamelCase = 3000
_UpperCamelCase = 10500
_UpperCamelCase = 128
_UpperCamelCase = 3
_UpperCamelCase = 1
_UpperCamelCase = 5
# tx_args
_UpperCamelCase = 3e-5
_UpperCamelCase = 0.0
_UpperCamelCase = 20000
_UpperCamelCase = 0.0095
_UpperCamelCase = "bigbird-roberta-natural-questions"
_UpperCamelCase = "training-expt"
_UpperCamelCase = "data/nq-training.jsonl"
_UpperCamelCase = "data/nq-validation.jsonl"
def UpperCamelCase_ ( self ):
os.makedirs(self.base_dir ,exist_ok=_lowerCAmelCase )
lowerCamelCase__ = os.path.join(self.base_dir ,self.save_dir )
lowerCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 4096 # no dynamic padding on TPUs
def __call__( self ,_lowerCAmelCase ):
lowerCamelCase__ = self.collate_fn(_lowerCAmelCase )
lowerCamelCase__ = jax.tree_util.tree_map(_lowerCAmelCase ,_lowerCAmelCase )
return batch
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = self.fetch_inputs(features["""input_ids"""] )
lowerCamelCase__ = {
"""input_ids""": jnp.array(_lowerCAmelCase ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowerCAmelCase ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = [self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = [1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any]=None ):
if seed is not None:
lowerCamelCase__ = dataset.shuffle(seed=__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) // batch_size ):
lowerCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__lowerCAmelCase )
@partial(jax.pmap , axis_name="""batch""" )
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Tuple ):
def loss_fn(__lowerCAmelCase : Tuple ):
lowerCamelCase__ = model_inputs.pop("""start_labels""" )
lowerCamelCase__ = model_inputs.pop("""end_labels""" )
lowerCamelCase__ = model_inputs.pop("""pooled_labels""" )
lowerCamelCase__ = state.apply_fn(**__lowerCAmelCase , params=__lowerCAmelCase , dropout_rng=__lowerCAmelCase , train=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
return state.loss_fn(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
lowerCamelCase__ , lowerCamelCase__ = jax.random.split(__lowerCAmelCase )
lowerCamelCase__ = jax.value_and_grad(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = grad_fn(state.params )
lowerCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowerCamelCase__ = jax.lax.pmean(__lowerCAmelCase , """batch""" )
lowerCamelCase__ = state.apply_gradients(grads=__lowerCAmelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def A__ ( __lowerCAmelCase : Dict , **__lowerCAmelCase : Any ):
lowerCamelCase__ = model_inputs.pop("""start_labels""" )
lowerCamelCase__ = model_inputs.pop("""end_labels""" )
lowerCamelCase__ = model_inputs.pop("""pooled_labels""" )
lowerCamelCase__ = state.apply_fn(**__lowerCAmelCase , params=state.params , train=__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = outputs
lowerCamelCase__ = state.loss_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase__ (train_state.TrainState ):
'''simple docstring'''
_UpperCamelCase = struct.field(pytree_node=a )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = model.params
lowerCamelCase__ = TrainState.create(
apply_fn=model.__call__ ,params=_lowerCAmelCase ,tx=_lowerCAmelCase ,loss_fn=_lowerCAmelCase ,)
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = restore_checkpoint(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__ = build_tx(**_lowerCAmelCase )
lowerCamelCase__ = train_state.TrainState(
step=_lowerCAmelCase ,apply_fn=model.__call__ ,params=_lowerCAmelCase ,tx=_lowerCAmelCase ,opt_state=_lowerCAmelCase ,)
lowerCamelCase__ = args
lowerCamelCase__ = data_collator
lowerCamelCase__ = lr
lowerCamelCase__ = params
lowerCamelCase__ = jax_utils.replicate(_lowerCAmelCase )
return state
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = self.args
lowerCamelCase__ = len(_lowerCAmelCase ) // args.batch_size
lowerCamelCase__ = jax.random.PRNGKey(0 )
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase__ = jnp.array(0 ,dtype=jnp.floataa )
lowerCamelCase__ = get_batched_dataset(_lowerCAmelCase ,args.batch_size ,seed=_lowerCAmelCase )
lowerCamelCase__ = 0
for batch in tqdm(_lowerCAmelCase ,total=_lowerCAmelCase ,desc=F'''Running EPOCH-{epoch}''' ):
lowerCamelCase__ = self.data_collator(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.train_step_fn(_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__ = jax_utils.unreplicate(state.step )
lowerCamelCase__ = running_loss.item() / i
lowerCamelCase__ = self.scheduler_fn(state_step - 1 )
lowerCamelCase__ = self.evaluate(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase ,commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' ,state=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = get_batched_dataset(_lowerCAmelCase ,self.args.batch_size )
lowerCamelCase__ = len(_lowerCAmelCase ) // self.args.batch_size
lowerCamelCase__ = jnp.array(0 ,dtype=jnp.floataa )
lowerCamelCase__ = 0
for batch in tqdm(_lowerCAmelCase ,total=_lowerCAmelCase ,desc="""Evaluating ... """ ):
lowerCamelCase__ = self.data_collator(_lowerCAmelCase )
lowerCamelCase__ = self.val_step_fn(_lowerCAmelCase ,**_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jax_utils.unreplicate(_lowerCAmelCase )
print(F'''SAVING CHECKPOINT IN {save_dir}''' ,end=""" ... """ )
self.model_save_fn(_lowerCAmelCase ,params=state.params )
with open(os.path.join(_lowerCAmelCase ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(_lowerCAmelCase ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(_lowerCAmelCase ,"""data_collator.joblib""" ) )
with open(os.path.join(_lowerCAmelCase ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,_lowerCAmelCase )
print("""DONE""" )
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(__lowerCAmelCase , """flax_model.msgpack""" ) , """rb""" ) as f:
lowerCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(__lowerCAmelCase , """opt_state.msgpack""" ) , """rb""" ) as f:
lowerCamelCase__ = from_bytes(state.opt_state , f.read() )
lowerCamelCase__ = joblib.load(os.path.join(__lowerCAmelCase , """args.joblib""" ) )
lowerCamelCase__ = joblib.load(os.path.join(__lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(__lowerCAmelCase , """training_state.json""" ) , """r""" ) as f:
lowerCamelCase__ = json.load(__lowerCAmelCase )
lowerCamelCase__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ):
lowerCamelCase__ = num_train_steps - warmup_steps
lowerCamelCase__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=__lowerCAmelCase , transition_steps=__lowerCAmelCase )
lowerCamelCase__ = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=1e-7 , transition_steps=__lowerCAmelCase )
lowerCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
def weight_decay_mask(__lowerCAmelCase : Any ):
lowerCamelCase__ = traverse_util.flatten_dict(__lowerCAmelCase )
lowerCamelCase__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(__lowerCAmelCase )
lowerCamelCase__ = scheduler_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = optax.adamw(learning_rate=__lowerCAmelCase , weight_decay=__lowerCAmelCase , mask=__lowerCAmelCase )
return tx, lr
| 50 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 1 |
'''simple docstring'''
import math
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : float = 1 / 1_2345 ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 3
while True:
lowerCamelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCAmelCase ):
lowerCamelCase__ = int(__lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(__lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 1 |
'''simple docstring'''
from __future__ import annotations
def A__ ( __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ):
lowerCamelCase__ , lowerCamelCase__ = position
lowerCamelCase__ = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase__ = []
for position in positions:
lowerCamelCase__ , lowerCamelCase__ = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def A__ ( __lowerCAmelCase : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def A__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ):
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
lowerCamelCase__ , lowerCamelCase__ = position
if board[y][x] == 0:
lowerCamelCase__ = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
lowerCamelCase__ = 0
return False
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
lowerCamelCase__ = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
lowerCamelCase__ = 0
lowerCamelCase__ = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 1 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'vision-encoder-decoder'
_UpperCamelCase = True
def __init__( self ,**_lowerCAmelCase ):
super().__init__(**_lowerCAmelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowerCamelCase__ = kwargs.pop("""encoder""" )
lowerCamelCase__ = encoder_config.pop("""model_type""" )
lowerCamelCase__ = kwargs.pop("""decoder""" )
lowerCamelCase__ = decoder_config.pop("""model_type""" )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = AutoConfig.for_model(_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = True
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowerCamelCase__ = True
lowerCamelCase__ = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.encoder.to_dict()
lowerCamelCase__ = self.decoder.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = version.parse('1.11' )
@property
def UpperCamelCase_ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self ):
return 1E-4
@property
def UpperCamelCase_ ( self ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict()
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
import torch
lowerCamelCase__ = OrderedDict()
lowerCamelCase__ = super().generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = dummy_input["""input_ids"""].shape
lowerCamelCase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowerCamelCase__ = dummy_input.pop("""input_ids""" )
lowerCamelCase__ = dummy_input.pop("""attention_mask""" )
lowerCamelCase__ = torch.zeros(_lowerCAmelCase )
return common_inputs
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
pass
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = "default" ):
lowerCamelCase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCAmelCase ,_lowerCAmelCase )
| 50 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCamelCase : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
UpperCamelCase : Union[str, Any] = '=======\n>>>>>>>\n'
UpperCamelCase : Optional[int] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
UpperCamelCase : Optional[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def A__ ( __lowerCAmelCase : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCamelCase__ (a ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=_lowerCAmelCase ,required=_lowerCAmelCase ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ,*_lowerCAmelCase ):
lowerCamelCase__ = get_logger("""datasets-cli/converting""" )
lowerCamelCase__ = tfds_path
lowerCamelCase__ = datasets_directory
def UpperCamelCase_ ( self ):
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowerCamelCase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase__ = os.listdir(_lowerCAmelCase )
else:
lowerCamelCase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
if not os.path.isfile(_lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowerCAmelCase ,encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = []
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__ = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__ = """"""
continue
elif "from absl import logging" in out_line:
lowerCamelCase__ = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowerCamelCase__ = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase__ = True
lowerCamelCase__ = list(filter(lambda _lowerCAmelCase : e in out_line ,_lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCAmelCase ) + """\n""" )
out_lines.append(_lowerCAmelCase )
out_lines.append(_lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__ = re.sub(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__ = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,_lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowerCamelCase__ = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__ = True
out_lines.append(_lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__ = f_name.replace(""".py""" ,"""""" )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(_lowerCAmelCase )
with open(_lowerCAmelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(_lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowerCamelCase__ = os.path.basename(_lowerCAmelCase )
lowerCamelCase__ = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowerCAmelCase ,_lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = 42
# setable values
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = None
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
return cls(common=_lowerCAmelCase ,init_noise_sigma=_lowerCAmelCase ,timesteps=_lowerCAmelCase )
@dataclass
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 42
class UpperCamelCase__ (a ,a ):
'''simple docstring'''
_UpperCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCamelCase = 42
@property
def UpperCamelCase_ ( self ):
return True
@register_to_config
def __init__( self ,_lowerCAmelCase = 10_00 ,_lowerCAmelCase = 0.0001 ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = "linear" ,_lowerCAmelCase = None ,_lowerCAmelCase = "fixed_small" ,_lowerCAmelCase = True ,_lowerCAmelCase = "epsilon" ,_lowerCAmelCase = jnp.floataa ,):
lowerCamelCase__ = dtype
def UpperCamelCase_ ( self ,_lowerCAmelCase = None ):
if common is None:
lowerCamelCase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase__ = jnp.array(1.0 ,dtype=self.dtype )
lowerCamelCase__ = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowerCAmelCase ,init_noise_sigma=_lowerCAmelCase ,timesteps=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ):
return sample
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = () ):
lowerCamelCase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase__ = (jnp.arange(0 ,_lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowerCAmelCase ,timesteps=_lowerCAmelCase ,)
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ):
lowerCamelCase__ = state.common.alphas_cumprod[t]
lowerCamelCase__ = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase__ = jnp.clip(_lowerCAmelCase ,a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase__ = jnp.log(jnp.clip(_lowerCAmelCase ,a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase__ = variance
lowerCamelCase__ = state.common.betas[t]
lowerCamelCase__ = (predicted_variance + 1) / 2
lowerCamelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,):
lowerCamelCase__ = timestep
if key is None:
lowerCamelCase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ = jnp.split(_lowerCAmelCase ,sample.shape[1] ,axis=1 )
else:
lowerCamelCase__ = None
# 1. compute alphas, betas
lowerCamelCase__ = state.common.alphas_cumprod[t]
lowerCamelCase__ = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) )
lowerCamelCase__ = 1 - alpha_prod_t
lowerCamelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase__ = jnp.clip(_lowerCAmelCase ,-1 ,1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase__ = jax.random.split(_lowerCAmelCase ,num=1 )
lowerCamelCase__ = jax.random.normal(_lowerCAmelCase ,shape=model_output.shape ,dtype=self.dtype )
return (self._get_variance(_lowerCAmelCase ,_lowerCAmelCase ,predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
lowerCamelCase__ = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) )
lowerCamelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowerCAmelCase ,state=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
return add_noise_common(state.common ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,):
return get_velocity_common(state.common ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 50 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase : List[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ = {}
lowerCamelCase__ = source_vertex
def UpperCamelCase_ ( self ):
lowerCamelCase__ = {self.source_vertex}
lowerCamelCase__ = None
lowerCamelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCAmelCase )
lowerCamelCase__ = vertex
queue.append(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ = self.parent.get(_lowerCAmelCase )
if target_vertex_parent is None:
lowerCamelCase__ = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_lowerCAmelCase )
return self.shortest_path(_lowerCAmelCase ) + F'''->{target_vertex}'''
if __name__ == "__main__":
UpperCamelCase : List[Any] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 1 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int = 100_0000 ):
lowerCamelCase__ = limit + 1
lowerCamelCase__ = [0] * limit
for first_term in range(1 , __lowerCAmelCase ):
for n in range(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowerCamelCase__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Dict = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'sew'
def __init__( self ,_lowerCAmelCase=32 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase=2 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase="group" ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) ,_lowerCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_lowerCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_lowerCAmelCase=False ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=True ,_lowerCAmelCase=0.05 ,_lowerCAmelCase=10 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=10 ,_lowerCAmelCase=0 ,_lowerCAmelCase="mean" ,_lowerCAmelCase=False ,_lowerCAmelCase=False ,_lowerCAmelCase=2_56 ,_lowerCAmelCase=0 ,_lowerCAmelCase=1 ,_lowerCAmelCase=2 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase ,pad_token_id=_lowerCAmelCase ,bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim )
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = squeeze_factor
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# sequence classification
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
@property
def UpperCamelCase_ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 50 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 1 |
'''simple docstring'''
import heapq
def A__ ( __lowerCAmelCase : dict ):
lowerCamelCase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__lowerCAmelCase , [-1 * len(__lowerCAmelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowerCamelCase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowerCamelCase__ = heapq.heappop(__lowerCAmelCase )[1][0]
chosen_vertices.add(__lowerCAmelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowerCamelCase__ = elem[1][1].index(__lowerCAmelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__lowerCAmelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 50 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['image_processor', 'tokenizer']
_UpperCamelCase = 'LayoutLMv3ImageProcessor'
_UpperCamelCase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_lowerCAmelCase ,)
lowerCamelCase__ = kwargs.pop("""feature_extractor""" )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCAmelCase ,_lowerCAmelCase )
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = 0 ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = False ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
lowerCamelCase__ = self.image_processor(images=_lowerCAmelCase ,return_tensors=_lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase__ = features["""words"""]
lowerCamelCase__ = self.tokenizer(
text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=_lowerCAmelCase ,add_special_tokens=_lowerCAmelCase ,padding=_lowerCAmelCase ,truncation=_lowerCAmelCase ,max_length=_lowerCAmelCase ,stride=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,return_overflowing_tokens=_lowerCAmelCase ,return_special_tokens_mask=_lowerCAmelCase ,return_offsets_mapping=_lowerCAmelCase ,return_length=_lowerCAmelCase ,verbose=_lowerCAmelCase ,return_tensors=_lowerCAmelCase ,**_lowerCAmelCase ,)
# add pixel values
lowerCamelCase__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCamelCase__ = self.get_overflowing_images(_lowerCAmelCase ,encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCamelCase__ = images
return encoded_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}''' )
return images_with_overflow
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,*_lowerCAmelCase ,**_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase ,**_lowerCAmelCase )
@property
def UpperCamelCase_ ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_lowerCAmelCase ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_lowerCAmelCase ,)
return self.image_processor
| 50 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
# Initialise PyTorch model
lowerCamelCase__ = BigBirdConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowerCamelCase__ = BigBirdForQuestionAnswering(__lowerCAmelCase )
else:
lowerCamelCase__ = BigBirdForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__lowerCAmelCase , __lowerCAmelCase , is_trivia_qa=__lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCamelCase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
_UpperCamelCase = None
@experimental
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return _map_with_joblib(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
lowerCamelCase__ = num_proc if num_proc <= len(__lowerCAmelCase ) else len(__lowerCAmelCase )
lowerCamelCase__ = [] # We organize the splits ourselve (contiguous splits)
for index in range(__lowerCAmelCase ):
lowerCamelCase__ = len(__lowerCAmelCase ) // num_proc
lowerCamelCase__ = len(__lowerCAmelCase ) % num_proc
lowerCamelCase__ = div * index + min(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowerCAmelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(__lowerCAmelCase )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(__lowerCAmelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
lowerCamelCase__ , lowerCamelCase__ = None, None
if not disable_tqdm:
lowerCamelCase__ , lowerCamelCase__ = (RLock(),), tqdm.set_lock
with Pool(__lowerCAmelCase , initargs=__lowerCAmelCase , initializer=__lowerCAmelCase ) as pool:
lowerCamelCase__ = pool.map(__lowerCAmelCase , __lowerCAmelCase )
logger.info(F'''Finished {num_proc} processes''' )
lowerCamelCase__ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(__lowerCAmelCase )} objects''' )
return mapped
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCAmelCase ):
return joblib.Parallel()(
joblib.delayed(__lowerCAmelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ = None
| 50 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : List[str] = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
UpperCamelCase : List[str] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ElectraTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'encodec'
def __init__( self ,_lowerCAmelCase=[1.5, 3.0, 6.0, 12.0, 24.0] ,_lowerCAmelCase=2_40_00 ,_lowerCAmelCase=1 ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=32 ,_lowerCAmelCase=1 ,_lowerCAmelCase=[8, 5, 4, 2] ,_lowerCAmelCase="weight_norm" ,_lowerCAmelCase=7 ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase="reflect" ,_lowerCAmelCase=2 ,_lowerCAmelCase=2 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = target_bandwidths
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = audio_channels
lowerCamelCase__ = normalize
lowerCamelCase__ = chunk_length_s
lowerCamelCase__ = overlap
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_filters
lowerCamelCase__ = num_residual_layers
lowerCamelCase__ = upsampling_ratios
lowerCamelCase__ = norm_type
lowerCamelCase__ = kernel_size
lowerCamelCase__ = last_kernel_size
lowerCamelCase__ = residual_kernel_size
lowerCamelCase__ = dilation_growth_rate
lowerCamelCase__ = use_causal_conv
lowerCamelCase__ = pad_mode
lowerCamelCase__ = compress
lowerCamelCase__ = num_lstm_layers
lowerCamelCase__ = trim_right_ratio
lowerCamelCase__ = codebook_size
lowerCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size
lowerCamelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def UpperCamelCase_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase_ ( self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 1 |
from collections.abc import Sequence
def __lowercase ( snake_case = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__magic_name__ :str = nums[0]
for i in range(1, len(snake_case ) ):
__magic_name__ :str = nums[i]
__magic_name__ :Any = max(snake_case, ans + num, snake_case )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
SCREAMING_SNAKE_CASE__ : Optional[int] = int(input("""Enter number of elements : """).strip())
SCREAMING_SNAKE_CASE__ : Any = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __lowerCamelCase (_a , _a ):
@register_to_config
def __init__( self: List[Any],A_: int = 768,):
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1,A_ ) )
__UpperCamelCase = nn.Parameter(torch.ones(1,A_ ) )
def snake_case_ ( self: Tuple,A_: Optional[Union[str, torch.device]] = None,A_: Optional[torch.dtype] = None,):
'''simple docstring'''
__UpperCamelCase = nn.Parameter(self.mean.to(A_ ).to(A_ ) )
__UpperCamelCase = nn.Parameter(self.std.to(A_ ).to(A_ ) )
return self
def snake_case_ ( self: Any,A_: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def snake_case_ ( self: Optional[Any],A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
from functools import reduce
UpperCAmelCase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE_ ( _snake_case :str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _snake_case , _snake_case : str(int(_snake_case ) * int(_snake_case ) ) , n[i : i + 13] ) )
for i in range(len(_snake_case ) - 12 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
'''simple docstring'''
lowerCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0217_6634e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355818,
}
def A_( A : str , A : str , A : float):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCamelCase = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(A)}'''
)
raise ValueError(A)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( a__ , a__ , unittest.TestCase ):
snake_case__ = IFInpaintingPipeline
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def UpperCamelCase__ ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(_snake_case )
else:
lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 4 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :list , __lowerCamelCase :int = 0 ):
_lowerCAmelCase = length or len(__lowerCamelCase )
_lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_lowerCAmelCase , _lowerCAmelCase = list_data[i + 1], list_data[i]
_lowerCAmelCase = True
return list_data if not swapped else bubble_sort(__lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 6 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : List[str] = '''src/diffusers'''
lowercase__ : Dict = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : str = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : List[Any] = spec.loader.load_module()
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Tuple:
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , __snake_case ) is not None
def _lowerCAmelCase ( __snake_case : Dict ) -> List[str]:
__A : Tuple = object_name.split('.' )
__A : str = 0
# First let's find the module where our object lives.
__A : int = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f'{module}.py' ) ):
i += 1
if i < len(__snake_case ):
__A : Tuple = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__snake_case , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
__A : Tuple = ''
__A : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : int = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : List[str] = lines[start_index:line_index]
return "".join(__snake_case )
lowercase__ : Optional[int] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase__ : Optional[Any] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase__ : Optional[int] = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( __snake_case : str ) -> Union[str, Any]:
__A : Dict = code.split('\n' )
__A : Optional[Any] = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> List[Any]:
__A : Union[str, Any] = len(get_indent(__snake_case ) ) > 0
if has_indent:
__A : List[Any] = f'class Bla:\n{code}'
__A : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__snake_case )
__A : Tuple = black.format_str(__snake_case , mode=__snake_case )
__A ,__A : Tuple = style_docstrings_in_code(__snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCAmelCase ( __snake_case : int , __snake_case : int=False ) -> Union[str, Any]:
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Union[str, Any] = f.readlines()
__A : int = []
__A : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
__A : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A ,__A ,__A : Dict = search.groups()
__A : Any = find_code_in_diffusers(__snake_case )
__A : Dict = get_indent(__snake_case )
__A : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : Optional[int] = theoretical_indent
__A : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : Any = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
__A : Tuple = lines[line_index]
__A : Tuple = _should_continue(__snake_case , __snake_case ) and re.search(f'^{indent}# End copy' , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Union[str, Any] = lines[start_index:line_index]
__A : Optional[Any] = ''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__A : Union[str, Any] = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__snake_case ) is None]
__A : str = '\n'.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
__A : str = replace_pattern.replace('with' , '' ).split(',' )
__A : Optional[int] = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A ,__A ,__A : Tuple = pattern.groups()
__A : Optional[int] = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
__A : Optional[int] = re.sub(obja.lower() , obja.lower() , __snake_case )
__A : str = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Any = blackify(lines[start_index - 1] + theoretical_code )
__A : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : int = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
return diffs
def _lowerCAmelCase ( __snake_case : bool = False ) -> int:
__A : List[Any] = glob.glob(os.path.join(__snake_case , '**/*.py' ) , recursive=__snake_case )
__A : Optional[Any] = []
for filename in all_files:
__A : List[str] = is_copy_consistent(__snake_case , __snake_case )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
__A : Dict = '\n'.join(__snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 8 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _A : Tuple=None , _A : Dict=900 , _A : Union[str, Any]=2048 , _A : Union[str, Any]=6 , _A : List[str]=2048 , _A : str=8 , _A : Optional[int]=6 , _A : List[str]=1024 , _A : Optional[int]=8 , _A : List[str]=0.0 , _A : List[str]=True , _A : Any="relu" , _A : Any=256 , _A : Optional[int]=0.1 , _A : str=0.0 , _A : Dict=0.0 , _A : str=0.02 , _A : Union[str, Any]=1.0 , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]="sine" , _A : int=5 , _A : Optional[Any]=4 , _A : Any=4 , _A : Union[str, Any]=True , _A : Dict=300 , _A : List[Any]=True , _A : Any=True , _A : Tuple=1 , _A : Optional[int]=5 , _A : str=2 , _A : Tuple=1 , _A : Tuple=1 , _A : Any=5 , _A : Tuple=2 , _A : str=0.1 , _A : List[str]=0.25 , **_A : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
return self.d_model
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : Optional[Union[str, Path]] = None
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Dict] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = 1
__lowerCamelCase : Optional[Union[str, bool]] = None
__lowerCamelCase : bool = False
__lowerCamelCase : Optional[Dict] = None
__lowerCamelCase : Optional[str] = None
def a__ (self ) -> "DownloadConfig":
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(A ) for k, v in self.__dict__.items()} )
| 11 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=0 ) -> List[str]:
'''simple docstring'''
if name is None:
lowercase__ : List[Any] = None
else:
lowercase__ : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase__ : int = fmt.format(lowercase_ )
# Print and recurse (if needed).
if isinstance(lowercase_ , lowercase_ ):
if msg is not None:
print(lowercase_ )
for k in val.keys():
recursive_print(lowercase_ , val[k] , spaces + 2 )
elif isinstance(lowercase_ , torch.Tensor ):
print(lowercase_ , """:""" , val.size() )
else:
print(lowercase_ , """:""" , lowercase_ )
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : List[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase__ : Optional[int] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase__ : str = param.view(*lowercase_ )
lowercase__ : Dict = param.transpose(0 , 2 )
lowercase__ : Tuple = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase__ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase__ : int = param.view(*lowercase_ )
lowercase__ : List[Any] = param.transpose(0 , 1 ).contiguous()
lowercase__ : List[str] = param.view(*lowercase_ )
return param
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
lowercase__ : Union[str, Any] = {}
# old versions did not store training args
lowercase__ : Optional[int] = input_state_dict.get("""args""" , lowercase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase__ : Optional[int] = ds_args.padded_vocab_size
lowercase__ : List[str] = ds_args.max_position_embeddings
lowercase__ : Union[str, Any] = ds_args.hidden_size
lowercase__ : Union[str, Any] = ds_args.num_layers
lowercase__ : List[Any] = ds_args.num_attention_heads
lowercase__ : Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase__ : Tuple = config.n_head
# The hidden_size per head.
lowercase__ : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase__ : str = input_state_dict["""checkpoint_version"""]
else:
lowercase__ : Optional[int] = 0.0
# The model.
lowercase__ : Tuple = input_state_dict["""model"""]
# The language model.
lowercase__ : List[str] = model["""language_model"""]
# The embeddings.
lowercase__ : Dict = lm["""embedding"""]
# The word embeddings.
lowercase__ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase__ : List[Any] = word_embeddings[: config.vocab_size, :]
lowercase__ : List[str] = word_embeddings
# The position embeddings.
lowercase__ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase__ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
lowercase__ : Optional[Any] = pos_embeddings
# The transformer.
lowercase__ : List[str] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase__ : str = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase__ : Optional[int] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase__ : int = layer_re.match(lowercase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase__ : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
lowercase__ : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
lowercase__ : Optional[int] = m.group(3 )
# The name of the layer.
lowercase__ : List[str] = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase__ : Optional[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase__ : str = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase__ : List[str] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowercase_ , lowercase_ )
lowercase__ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase__ : Dict = torch.tensor(-1E4 , dtype=torch.floataa )
lowercase__ : Tuple = masked_bias
lowercase__ : List[Any] = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase__ : Optional[int] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase__ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase__ : List[str] = fix_query_key_value_ordering(lowercase_ , lowercase_ , 3 , lowercase_ , lowercase_ )
# Store. No change of shape.
lowercase__ : Dict = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase__ : List[str] = megatron_to_transformers[op_name]
lowercase__ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase__ : List[str] = megatron_to_transformers[op_name]
lowercase__ : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase__ : Union[str, Any] = transformer["""final_layernorm.weight"""]
lowercase__ : List[Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase__ : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowercase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowercase_ , help="""An optional config json file describing the pre-trained model.""" , )
lowercase__ : Optional[int] = parser.parse_args()
# Extract the basename.
lowercase__ : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase__ : Union[str, Any] = torch.load(lowercase_ , map_location="""cpu""" )
else:
lowercase__ : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase__ : Optional[int] = input_state_dict.get("""args""" , lowercase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase__ : int = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase__ : Tuple = """gelu_new"""
else:
lowercase__ : Union[str, Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase__ : Optional[int] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase__ : Tuple = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowercase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowercase_ , summary_activation=lowercase_ , summary_proj_to_labels=lowercase_ , summary_first_dropout=0.1 , scale_attn_weights=lowercase_ , use_cache=lowercase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
lowercase__ : List[str] = GPTaConfig.from_json_file(args.config_file )
lowercase__ : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase__ : Dict = convert_megatron_checkpoint(lowercase_ , lowercase_ , lowercase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowercase_ , lowercase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase__ : Dict = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase__ : List[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase__ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
lowercase__ : Union[str, Any] = """gpt2"""
lowercase__ : List[Any] = AutoTokenizer.from_pretrained(lowercase_ )
lowercase__ : int = type(lowercase_ ).__name__
lowercase__ : str = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowercase_ )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(lowercase_ )
# Store the state_dict to file.
lowercase__ : Optional[Any] = os.path.join(lowercase_ , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(lowercase_ , lowercase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 12 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
A__ : Optional[Any] = (720, 1280) # Height, Width
A__ : int = (0.4, 0.6) # if height or width lower than this scale, drop it.
A__ : int = 1 / 100
A__ : Dict = """"""
A__ : int = """"""
A__ : int = """"""
A__ : List[str] = 250
def UpperCAmelCase__ ( ) -> None:
__lowerCamelCase , __lowerCamelCase : Any = get_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
for index in range(UpperCAmelCase_ ):
__lowerCamelCase : Dict = random.sample(range(len(UpperCAmelCase_ ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = update_image_and_anno(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , filter_scale=UpperCAmelCase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Union[str, Any] = random_chars(32 )
__lowerCamelCase : Any = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
__lowerCamelCase : Union[str, Any] = F'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(F'{file_root}.jpg' , UpperCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
__lowerCamelCase : List[str] = []
for anno in new_annos:
__lowerCamelCase : Optional[int] = anno[3] - anno[1]
__lowerCamelCase : Optional[Any] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Optional[Any] = anno[2] + height / 2
__lowerCamelCase : List[Any] = F'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(UpperCAmelCase_ )
with open(F'{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> tuple[list, list]:
__lowerCamelCase : int = []
__lowerCamelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(UpperCAmelCase_ , '*.txt' ) ):
__lowerCamelCase : Tuple = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(UpperCAmelCase_ ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : Tuple = os.path.join(UpperCAmelCase_ , F'{label_name}.jpg' )
__lowerCamelCase : List[str] = []
for obj_list in obj_lists:
__lowerCamelCase : List[Any] = obj_list.rstrip('\n' ).split(' ' )
__lowerCamelCase : Dict = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase_ )
labels.append(UpperCAmelCase_ )
return img_paths, labels
def UpperCAmelCase__ ( UpperCAmelCase_ : list , UpperCAmelCase_ : list , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : float = 0.0 , ) -> tuple[list, list, str]:
__lowerCamelCase : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : Any = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[str] = []
__lowerCamelCase : int = []
for i, index in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : int = all_img_list[index]
path_list.append(UpperCAmelCase_ )
__lowerCamelCase : str = all_annos[index]
__lowerCamelCase : List[Any] = cva.imread(UpperCAmelCase_ )
if i == 0: # top-left
__lowerCamelCase : Union[str, Any] = cva.resize(UpperCAmelCase_ , (divid_point_x, divid_point_y) )
__lowerCamelCase : str = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Tuple = bbox[2] * scale_y
__lowerCamelCase : Tuple = bbox[3] * scale_x
__lowerCamelCase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : Dict = cva.resize(UpperCAmelCase_ , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : str = img
for bbox in img_annos:
__lowerCamelCase : List[str] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Any = bbox[2] * scale_y
__lowerCamelCase : str = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(UpperCAmelCase_ , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Dict = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : List[Any] = cva.resize(
UpperCAmelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[Any] = img
for bbox in img_annos:
__lowerCamelCase : Any = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : str = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : Union[str, Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Any = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 13 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = DiTPipeline
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A__ = False
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_UpperCAmelCase , )
lowercase__ = AutoencoderKL()
lowercase__ = DDIMScheduler()
lowercase__ = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=0 ) -> Any:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__ = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = """cpu"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__ = pipe(**_UpperCAmelCase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase , 1E-3 )
def lowerCamelCase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = torch.manual_seed(0 )
lowercase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowercase__ = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowercase__ = pipe.get_label_ids(_UpperCAmelCase )
lowercase__ = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ (self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowercase__ = ["""vase""", """umbrella"""]
lowercase__ = pipe.get_label_ids(_UpperCAmelCase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 15 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : int = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__A : Dict = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
__A : Optional[Any] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = BertTokenizer
def __init__( self : Dict , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]="[UNK]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : List[str]="[PAD]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : Optional[Any]="[MASK]" , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=None , **__lowerCamelCase : Dict , ):
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def _snake_case ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 16 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCAmelCase_ : Tuple = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__A : int = """https://pypi.org/pypi/diffusers/json"""
__A : List[str] = json.loads(request.urlopen(a__ ).read() )["""releases"""].keys()
return sorted(a__ ,key=lambda a__ : version.Version(a__ ) )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(a__ )
os.makedirs(a__ ,exist_ok=a__ )
__A : str = Path(a__ ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ) -> List[Any]:
init_hf_modules()
__A : Union[str, Any] = Path(a__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(a__ ,exist_ok=a__ )
__A : Optional[Any] = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Optional[int]:
with open(a__ ,"""r""" ,encoding="""utf-8""" ) as f:
__A : Optional[int] = f.read()
# Imports of the form `import .xxx`
__A : str = re.findall("""^\s*import\s+\.(\S+)\s*$""" ,a__ ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" ,a__ ,flags=re.MULTILINE )
# Unique-ify
return list(set(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : List[Any] ) -> Union[str, Any]:
__A : List[str] = False
__A : Any = [module_file]
__A : Any = []
# Let's recurse through all relative imports
while not no_change:
__A : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(a__ ) )
__A : Optional[Any] = Path(a__ ).parent
__A : Tuple = [str(module_path / m ) for m in new_imports]
__A : int = [f for f in new_import_files if f not in all_relative_imports]
__A : int = [f"""{f}.py""" for f in new_import_files]
__A : Tuple = len(a__ ) == 0
all_relative_imports.extend(a__ )
return all_relative_imports
def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]:
with open(a__ ,"""r""" ,encoding="""utf-8""" ) as f:
__A : Dict = f.read()
# Imports of the form `import xxx`
__A : Tuple = re.findall("""^\s*import\s+(\S+)\s*$""" ,a__ ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" ,a__ ,flags=re.MULTILINE )
# Only keep the top-level module
__A : Tuple = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
__A : Any = list(set(a__ ) )
__A : Optional[int] = []
for imp in imports:
try:
importlib.import_module(a__ )
except ImportError:
missing_packages.append(a__ )
if len(a__ ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
f"""{", ".join(a__ )}. Run `pip install {" ".join(a__ )}`""" )
return get_relative_imports(a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ,a__ : str ) -> Optional[int]:
__A : Dict = module_path.replace(os.path.sep ,""".""" )
__A : Tuple = importlib.import_module(a__ )
if class_name is None:
return find_pipeline_class(a__ )
return getattr(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Optional[Any] ) -> Union[str, Any]:
from ..pipelines import DiffusionPipeline
__A : Any = dict(inspect.getmembers(a__ ,inspect.isclass ) )
__A : Dict = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,a__ )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
__A : Optional[Any] = cls
return pipeline_class
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ,a__ : str ,a__ : Optional[Union[str, os.PathLike]] = None ,a__ : bool = False ,a__ : bool = False ,a__ : Optional[Dict[str, str]] = None ,a__ : Optional[Union[bool, str]] = None ,a__ : Optional[str] = None ,a__ : bool = False ,) -> Union[str, Any]:
__A : Any = str(a__ )
__A : Optional[Any] = os.path.join(a__ ,a__ )
if os.path.isfile(a__ ):
__A : Any = module_file_or_url
__A : int = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
__A : Tuple = get_diffusers_versions()
# cut ".dev0"
__A : str = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
__A : Optional[Any] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
__A : Any = f"""v{revision}"""
elif revision == "main":
__A : Dict = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
__A : Union[str, Any] = COMMUNITY_PIPELINES_URL.format(revision=a__ ,pipeline=a__ )
try:
__A : Optional[Any] = cached_download(
a__ ,cache_dir=a__ ,force_download=a__ ,proxies=a__ ,resume_download=a__ ,local_files_only=a__ ,use_auth_token=a__ ,)
__A : Dict = """git"""
__A : str = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
__A : List[str] = hf_hub_download(
a__ ,a__ ,cache_dir=a__ ,force_download=a__ ,proxies=a__ ,resume_download=a__ ,local_files_only=a__ ,use_auth_token=a__ ,)
__A : Tuple = os.path.join("""local""" ,"""--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
__A : Tuple = check_imports(a__ )
# Now we move the module inside our cached dynamic modules.
__A : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(a__ )
__A : List[Any] = Path(a__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(a__ ,submodule_path / module_file )
for module_needed in modules_needed:
__A : int = f"""{module_needed}.py"""
shutil.copy(os.path.join(a__ ,a__ ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(a__ ,a__ ):
__A : List[Any] = use_auth_token
elif use_auth_token is True:
__A : Any = HfFolder.get_token()
else:
__A : Dict = None
__A : Union[str, Any] = model_info(a__ ,revision=a__ ,token=a__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A : Union[str, Any] = submodule_path / commit_hash
__A : Union[str, Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(a__ )
if not (submodule_path / module_file).exists():
shutil.copy(a__ ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
a__ ,f"""{module_needed}.py""" ,cache_dir=a__ ,force_download=a__ ,resume_download=a__ ,proxies=a__ ,use_auth_token=a__ ,revision=a__ ,local_files_only=a__ ,)
return os.path.join(a__ ,a__ )
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, os.PathLike] ,a__ : str ,a__ : Optional[str] = None ,a__ : Optional[Union[str, os.PathLike]] = None ,a__ : bool = False ,a__ : bool = False ,a__ : Optional[Dict[str, str]] = None ,a__ : Optional[Union[bool, str]] = None ,a__ : Optional[str] = None ,a__ : bool = False ,**a__ : Optional[Any] ,) -> List[Any]:
__A : Optional[int] = get_cached_module_file(
a__ ,a__ ,cache_dir=a__ ,force_download=a__ ,resume_download=a__ ,proxies=a__ ,use_auth_token=a__ ,revision=a__ ,local_files_only=a__ ,)
return get_class_in_module(a__ ,final_module.replace(""".py""" ,"""""" ) )
| 17 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , _lowerCAmelCase=2 , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 1
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = ViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = ViTForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = ViTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Any:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> List[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(_lowerCAmelCase )
_lowerCAmelCase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> int:
_lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
| 18 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = 9, 14 # noqa: F841
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCamelCase = mst(__snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCamelCase = tuple(answer[:2] )
_UpperCamelCase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 19 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase_ (lowercase__ ):
snake_case ='pix2struct_text_model'
snake_case =['past_key_values']
snake_case ={
'hidden_size': 'hidden_size',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowercase_=50244 , lowercase_=768 , lowercase_=64 , lowercase_=2048 , lowercase_=12 , lowercase_=12 , lowercase_=32 , lowercase_=128 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=1.0 , lowercase_="gelu_new" , lowercase_=0 , lowercase_=False , lowercase_=0 , lowercase_=1 , lowercase_=False , lowercase_=True , **lowercase_ , ) -> str:
a__ =vocab_size
a__ =hidden_size
a__ =d_kv
a__ =d_ff
a__ =num_layers
a__ =num_heads
a__ =relative_attention_num_buckets
a__ =relative_attention_max_distance
a__ =dropout_rate
a__ =layer_norm_epsilon
a__ =initializer_factor
a__ =use_cache
a__ =eos_token_id
a__ =decoder_start_token_id
# for backwards compatibility
a__ =dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_)
a__ , a__ =cls.get_config_dict(lowercase_ , **lowercase_)
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
a__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase_ , **lowercase_)
class lowercase_ (lowercase__ ):
snake_case ='pix2struct_vision_model'
def __init__( self , lowercase_=768 , lowercase_=768 , lowercase_=2048 , lowercase_=64 , lowercase_=12 , lowercase_=12 , lowercase_="gelu_new" , lowercase_=1e-6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=1e-10 , lowercase_=1.0 , lowercase_=4096 , lowercase_=32 , lowercase_=128 , **lowercase_ , ) -> Optional[Any]:
super().__init__(**lowercase_)
a__ =hidden_size
a__ =patch_embed_hidden_size
a__ =d_ff
a__ =dropout_rate
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =initializer_range
a__ =initializer_factor
a__ =attention_dropout
a__ =layer_norm_eps
a__ =dense_act_fn
a__ =seq_len
a__ =relative_attention_num_buckets
a__ =relative_attention_max_distance
a__ =d_kv
@classmethod
def __UpperCamelCase ( cls , lowercase_ , **lowercase_) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_)
a__ , a__ =cls.get_config_dict(lowercase_ , **lowercase_)
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type') == "pix2struct":
a__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase_ , **lowercase_)
class lowercase_ (lowercase__ ):
snake_case ='pix2struct'
snake_case =True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=1.0 , lowercase_=0.02 , lowercase_=False , lowercase_=False , lowercase_=True , **lowercase_ , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_)
if text_config is None:
a__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.')
if vision_config is None:
a__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.')
a__ =PixaStructTextConfig(**lowercase_)
a__ =PixaStructVisionConfig(**lowercase_)
a__ =self.text_config.decoder_start_token_id
a__ =self.text_config.pad_token_id
a__ =self.text_config.eos_token_id
a__ =initializer_factor
a__ =initializer_range
a__ =self.initializer_range
a__ =self.initializer_range
a__ =is_vqa
@classmethod
def __UpperCamelCase ( cls , lowercase_ , lowercase_ , **lowercase_) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_)
def __UpperCamelCase ( self) -> Union[str, Any]:
a__ =copy.deepcopy(self.__dict__)
a__ =self.text_config.to_dict()
a__ =self.vision_config.to_dict()
a__ =self.__class__.model_type
return output
| 20 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Union[str, Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ["ViTFeatureExtractor"]
UpperCAmelCase_ : Any = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __lowerCAmelCase ( *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case_ (UpperCamelCase : Image ):
'''simple docstring'''
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A ( unittest.TestCase ):
lowercase_ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = DepthEstimationPipeline(model=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
_a = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase_ )
import datasets
_a = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_a = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , lowerCAmelCase_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def __lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def __lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
_a = '''Intel/dpt-large'''
_a = pipeline('''depth-estimation''' , model=lowerCAmelCase_ )
_a = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_a = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
snake_case__ : Union[str, Any] = """Input must be a string of 8 numbers plus letter"""
snake_case__ : Optional[int] = """TRWAGMYFPDXBNJZSQVHLCKE"""
def _snake_case (__lowercase):
if not isinstance(__lowercase , __lowercase):
UpperCamelCase_ = f"""Expected string as input, found {type(__lowercase).__name__}"""
raise TypeError(__lowercase)
UpperCamelCase_ = spanish_id.replace('-' , '').upper()
if len(__lowercase) != 9:
raise ValueError(__lowercase)
try:
UpperCamelCase_ = int(spanish_id_clean[0:8])
UpperCamelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowercase) from ex
if letter.isdigit():
raise ValueError(__lowercase)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = n
__snake_case = [None] * self.n
__snake_case = 0 # index of the first element
__snake_case = 0
__snake_case = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowerCAmelCase ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
__snake_case = data
__snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
__snake_case = self.array[self.front]
__snake_case = None
__snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 24 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=__A ):
'''simple docstring'''
lowerCamelCase__ =['torch', 'torchsde']
def __init__( self : Optional[int] , *a : Optional[Any] , **a : Any ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *a : int , **a : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *a : Dict , **a : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"] )
| 25 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class _A :
def __init__( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = []
def lowercase__ ( self : int , __magic_name__ : List[Any] ) -> Dict:
"""simple docstring"""
return self.node_position[vertex]
def lowercase__ ( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = pos
def lowercase__ ( self : int , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__snake_case : int = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__snake_case : int = 2 * start + 1
else:
__snake_case : Any = 2 * start + 2
if heap[smallest_child] < heap[start]:
__snake_case , __snake_case : Tuple = heap[smallest_child], positions[smallest_child]
__snake_case , __snake_case : Dict = (
heap[start],
positions[start],
)
__snake_case , __snake_case : Union[str, Any] = temp, tempa
__snake_case : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = position[index]
while index != 0:
__snake_case : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__snake_case : Optional[Any] = heap[parent]
__snake_case : Dict = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
__snake_case : int = val
__snake_case : int = temp
self.set_position(__magic_name__ , __magic_name__ )
break
__snake_case : List[str] = parent
else:
__snake_case : Dict = val
__snake_case : Optional[int] = temp
self.set_position(__magic_name__ , 0 )
def lowercase__ ( self : Optional[int] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
__snake_case : Any = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def lowercase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Optional[Any] = positions[0]
__snake_case : Optional[int] = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = Heap()
__snake_case : List[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__snake_case : List[Any] = [] # Heap of Distance of vertices from their neighboring vertex
__snake_case : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
__snake_case : Optional[int] = []
__snake_case : List[str] = 1
__snake_case : Any = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__snake_case : List[str] = 0
__snake_case : List[Any] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
__snake_case : Tuple = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__snake_case : Any = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
__snake_case : Tuple = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
__snake_case : int = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase = int(input("Enter number of edges: ").strip())
__UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 26 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
'''simple docstring'''
from math import ceil
def lowercase__( __UpperCamelCase: int = 10_01 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : str = 2 * i + 1
SCREAMING_SNAKE_CASE : Tuple = 2 * i
SCREAMING_SNAKE_CASE : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 28 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCamelCase_ = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCAmelCase_ : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
import string
from math import logaa
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
SCREAMING_SNAKE_CASE_ = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[int, int]:
SCREAMING_SNAKE_CASE_ = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE_ = corpus_without_punctuation.split('\n' )
SCREAMING_SNAKE_CASE_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__UpperCAmelCase ))
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
return round(tf * idf , 3 )
| 31 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
for char in word:
_UpperCAmelCase = ord(SCREAMING_SNAKE_CASE_ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE_ ):
return 0
return 1
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = set()
for token in tokens:
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE_ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
return word_list
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : set() ) -> Tuple:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for w in chinese_word_set] )
_UpperCAmelCase = bert_tokens
_UpperCAmelCase , _UpperCAmelCase = 0, len(SCREAMING_SNAKE_CASE_ )
while start < end:
_UpperCAmelCase = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase = min(end - start , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ , 1 , -1 ):
_UpperCAmelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase = '''##''' + bert_word[j]
_UpperCAmelCase = start + i
_UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : LTP , SCREAMING_SNAKE_CASE_ : BertTokenizer ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ):
_UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
_UpperCAmelCase = [get_chinese_word(SCREAMING_SNAKE_CASE_ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ):
_UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = []
for id in input_ids:
_UpperCAmelCase = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE_ )
input_tokens.append(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = add_sub_symbol(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
if token[:2] == "##":
_UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE_ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE_ ) ):
ref_id.append(SCREAMING_SNAKE_CASE_ )
ref_ids.append(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
return ref_ids
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase = prepare_ref(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = [json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase_ = parser.parse_args()
main(args)
| 32 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'sew'
def __init__( self:Any , _a:Union[str, Any]=32 , _a:Optional[int]=7_68 , _a:Optional[int]=12 , _a:Any=12 , _a:List[Any]=30_72 , _a:List[str]=2 , _a:int="gelu" , _a:Any=0.1 , _a:Tuple=0.1 , _a:int=0.1 , _a:int=0.0 , _a:Any=0.1 , _a:Tuple=0.1 , _a:List[Any]=0.02 , _a:List[str]=1e-5 , _a:Union[str, Any]="group" , _a:Optional[int]="gelu" , _a:Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _a:Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a:Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a:Tuple=False , _a:Any=1_28 , _a:Optional[Any]=16 , _a:str=True , _a:Dict=0.05 , _a:Tuple=10 , _a:Optional[Any]=2 , _a:str=0.0 , _a:Union[str, Any]=10 , _a:Optional[Any]=0 , _a:Union[str, Any]="mean" , _a:Tuple=False , _a:List[str]=False , _a:Optional[Any]=2_56 , _a:Any=0 , _a:Any=1 , _a:List[str]=2 , **_a:Any , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = squeeze_factor
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# sequence classification
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 33 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
class snake_case_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
A_ = None
A_ = None
class snake_case_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
A_ = datasets.Audio()
A_ = '''audio'''
A_ = AudioFolderConfig
A_ = 42 # definition at the bottom of the script
A_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
SCREAMING_SNAKE_CASE_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
SCREAMING_SNAKE_CASE_ = AUDIO_EXTENSIONS
| 34 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ :Any = '<<<<<<< This should probably be modified because it mentions: '
a_ :int = '=======\n>>>>>>>\n'
a_ :Dict = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
a_ :int = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_lowercase , required=_lowercase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_lowercase , required=_lowercase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_lowercase )
def __init__( self : str , _lowercase : str , _lowercase : str , *_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_logger('''datasets-cli/converting''' )
SCREAMING_SNAKE_CASE__ : Tuple = tfds_path
SCREAMING_SNAKE_CASE__ : List[Any] = datasets_directory
def lowercase__ ( self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Any = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : int = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = {}
if os.path.isdir(self._tfds_path ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.listdir(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_lowercase , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for line in lines:
SCREAMING_SNAKE_CASE__ : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
SCREAMING_SNAKE_CASE__ : Any = ''''''
continue
elif "from absl import logging" in out_line:
SCREAMING_SNAKE_CASE__ : Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
SCREAMING_SNAKE_CASE__ : Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + '''\n''' )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
SCREAMING_SNAKE_CASE__ : str = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
SCREAMING_SNAKE_CASE__ : List[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
SCREAMING_SNAKE_CASE__ : List[Any] = f_name.replace('''.py''' , '''''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_lowercase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
SCREAMING_SNAKE_CASE__ : Any = os.path.basename(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Dict = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = '''sew'''
def __init__( self ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=768 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=12 ,SCREAMING_SNAKE_CASE_=3072 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_="group" ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=128 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.05 ,SCREAMING_SNAKE_CASE_=10 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.0 ,SCREAMING_SNAKE_CASE_=10 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_="mean" ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_=256 ,SCREAMING_SNAKE_CASE_=0 ,SCREAMING_SNAKE_CASE_=1 ,SCREAMING_SNAKE_CASE_=2 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ ,pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = hidden_size
snake_case : List[Any] = feat_extract_norm
snake_case : List[str] = feat_extract_activation
snake_case : int = list(SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = list(SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = list(SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = conv_bias
snake_case : Any = num_conv_pos_embeddings
snake_case : List[str] = num_conv_pos_embedding_groups
snake_case : Union[str, Any] = len(self.conv_dim )
snake_case : Optional[Any] = num_hidden_layers
snake_case : List[str] = intermediate_size
snake_case : List[Any] = squeeze_factor
snake_case : Dict = hidden_act
snake_case : Tuple = num_attention_heads
snake_case : int = hidden_dropout
snake_case : Tuple = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : List[str] = feat_proj_dropout
snake_case : Tuple = final_dropout
snake_case : Tuple = layerdrop
snake_case : Any = layer_norm_eps
snake_case : Union[str, Any] = initializer_range
snake_case : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : int = apply_spec_augment
snake_case : Any = mask_time_prob
snake_case : int = mask_time_length
snake_case : Any = mask_time_min_masks
snake_case : List[Any] = mask_feature_prob
snake_case : Dict = mask_feature_length
snake_case : Any = mask_feature_min_masks
# ctc loss
snake_case : List[str] = ctc_loss_reduction
snake_case : Union[str, Any] = ctc_zero_infinity
# sequence classification
snake_case : int = use_weighted_layer_sum
snake_case : List[str] = classifier_proj_size
@property
def snake_case_ ( self ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 36 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A__ ( A__ , A__ ):
"""simple docstring"""
_lowercase = 1
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase__ : int=2_000 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Tuple=20 , lowerCamelCase__ : Dict=1E-3 ):
a__ : Union[str, Any] = None
a__ : Dict = None
a__ : List[str] = None
def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, torch.device] = None ):
a__ : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ )
def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a__ : Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a__ : Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a__ : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
a__ : Union[str, Any] = std.unsqueeze(-1 )
a__ : int = -score / std
# compute
a__ : List[Any] = -1.0 / len(self.timesteps )
a__ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a__ : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a__ : int = beta_t.unsqueeze(-1 )
a__ : Any = -0.5 * beta_t * x
a__ : Tuple = torch.sqrt(lowerCamelCase__ )
a__ : int = drift - diffusion**2 * score
a__ : int = x + drift * dt
# add noise
a__ : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype )
a__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
return self.config.num_train_timesteps
| 37 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Tuple = set()
# Replace all the whitespace in our sentence
snake_case__ : List[Any] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case__ : int = True
elif char.isupper():
snake_case__ : Optional[Any] = True
return all(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
snake_case__ : Optional[Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_faster()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_fastest()""" , setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 38 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowerCAmelCase_ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowerCAmelCase_ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="dummy_doc" ):
snake_case_ = {doc: key_lines}
snake_case_ = {doc: sys_lines}
snake_case_ = {}
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
snake_case_, snake_case_ = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_, snake_case_ = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if remove_nested:
snake_case_, snake_case_ = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case_, snake_case_ = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case_ = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'''files, respectively''' )
return doc_coref_infos
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = get_coref_infos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = {}
snake_case_ = 0
snake_case_ = 0
for name, metric in metrics:
snake_case_, snake_case_, snake_case_ = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
snake_case_ = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case_ = line.split()[5]
if not parse_col == "-":
snake_case_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : List[str] ) ->Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def snake_case__( self : str , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : int=False , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=False ) ->Tuple:
snake_case_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case_ = util.check_gold_parse_annotation(_UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case_ = evaluate(
key_lines=_UpperCamelCase , sys_lines=_UpperCamelCase , metrics=_UpperCamelCase , NP_only=_UpperCamelCase , remove_nested=_UpperCamelCase , keep_singletons=_UpperCamelCase , min_span=_UpperCamelCase , )
return score
| 39 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Any = "yolos"
def __init__( self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=[512, 864], SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=100, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.1, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Tuple = num_attention_heads
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : Any = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : Tuple = image_size
UpperCamelCase : int = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : List[str] = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : Tuple = use_mid_position_embeddings
UpperCamelCase : Tuple = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Any = class_cost
UpperCamelCase : Optional[int] = bbox_cost
UpperCamelCase : str = giou_cost
# Loss coefficients
UpperCamelCase : List[str] = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Optional[int] = eos_coefficient
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[Any] = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-4
@property
def snake_case_ ( self ) -> int:
return 12
| 40 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] ,generator=lowercase__ ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_euler''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe([prompt] ,generator=lowercase__ ,guidance_scale=9.0 ,num_inference_steps=2_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
__lowercase = '''A painting of a squirrel eating a burger'''
__lowercase = torch.manual_seed(0 )
__lowercase = sd_pipe(
[prompt] ,generator=lowercase__ ,guidance_scale=7.5 ,num_inference_steps=1_5 ,output_type='''np''' ,use_karras_sigmas=lowercase__ ,)
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _UpperCamelCase ( __UpperCamelCase ) -> str:
lowerCamelCase_ = []
for line in lines:
lowerCamelCase_ = re.sub(R'#.*' ,'' ,__UpperCamelCase ) # remove comments
if line:
filtered_lines.append(__UpperCamelCase )
lowerCamelCase_ = '\n'.join(__UpperCamelCase )
# Make a hash from all this code
lowerCamelCase_ = full_str.encode('utf-8' )
return shaaaa(__UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
A_ = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A_ = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A_ = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
A_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 42 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _a :
_lowercase : int
_lowercase : Node | None = None
_lowercase : Node | None = None
def _a ( ):
"""simple docstring"""
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
def populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return output
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(SCREAMING_SNAKE_CASE )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ = 0
return output
def _a ( ): # Main function for testing.
"""simple docstring"""
lowercase__ = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE )}' , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(SCREAMING_SNAKE_CASE ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(SCREAMING_SNAKE_CASE ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE , level=SCREAMING_SNAKE_CASE ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase = "\\n Text data.\n Second line of data."
UpperCamelCase = "file"
@pytest.fixture(scope="""session""" )
def A ( lowercase__ : List[str] ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase__ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def A ( lowercase__ : str ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , """w""" ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase__ :List[Any] = input_paths[compression_format]
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Dict = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
UpperCamelCase__ :int = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
UpperCamelCase__ :int = f.read()
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[Any] ) -> List[str]:
UpperCamelCase__ :Dict = """custom_cache"""
UpperCamelCase__ :Union[str, Any] = """custom_extracted_dir"""
UpperCamelCase__ :Optional[int] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase__ :Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ :Optional[int] = xz_file
UpperCamelCase__ :Optional[Any] = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
UpperCamelCase__ :str = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def A ( lowercase__ : List[str] ) -> Dict:
# absolute path
UpperCamelCase__ :Optional[Any] = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
UpperCamelCase__ :str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def A ( lowercase__ : Optional[Any] ) -> Tuple:
# absolute path
UpperCamelCase__ :Tuple = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
UpperCamelCase__ :Tuple = """./__missing_file__.txt"""
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Any = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( ) -> Tuple:
with pytest.raises(lowercase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
http_get("""https://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : List[Any] ) -> int:
UpperCamelCase__ :List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head("""s3://huggingface.co""" )
| 45 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase : Optional[Any] = get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = mock.Mock()
_lowerCamelCase : Union[str, Any] = 500
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : str = HTTPError
_lowerCamelCase : Dict = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" ,return_value=__lowerCAmelCase ) as mock_head:
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def _lowercase ( cls: int ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowercase ( cls: Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="test-feature-extractor" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" ,use_auth_token=self._token )
_lowerCamelCase : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase ,repo_id="valid_org/test-feature-extractor-org" ,push_to_hub=__lowerCAmelCase ,use_auth_token=self._token )
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase ,getattr(__lowerCAmelCase ,__lowerCAmelCase ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase : Optional[Any] = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map ,{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} ,)
_lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" ,trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ ,"CustomFeatureExtractor" )
| 46 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import os
from distutils.util import strtobool
def UpperCAmelCase__ ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
for e in env_keys:
__a : List[str] = int(os.environ.get(lowerCamelCase_ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=False ):
__a : Dict = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return strtobool(lowerCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple="no" ):
__a : str = os.environ.get(lowerCamelCase_ , str(lowerCamelCase_ ) )
return value
| 47 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
lowerCAmelCase__ = len(bin(UpperCamelCase_ )[3:] )
lowerCAmelCase__ = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
lowerCAmelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(UpperCamelCase_ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = ["image_processor", "tokenizer"]
a__ : Any = "CLIPImageProcessor"
a__ : Any = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[Any] , _lowercase : Tuple=None , _lowercase : Any=None , **_lowercase : List[Any] ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
__UpperCAmelCase = kwargs.pop('''feature_extractor''' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
def __call__( self : List[str] , _lowercase : List[str]=None , _lowercase : List[Any]=None , _lowercase : int=None , **_lowercase : Optional[int] ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
__UpperCAmelCase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def a ( self : Optional[Any] , *_lowercase : List[Any] , **_lowercase : Any ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : Any , **_lowercase : List[Any] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a ( self : str ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def a ( self : int ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 49 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
'''simple docstring'''
import string
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
UpperCAmelCase = ''''''
for i in sequence:
UpperCAmelCase = ord(SCREAMING_SNAKE_CASE_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
UpperCAmelCase = string.ascii_letters
UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE_ )] if c in letters else c for c in sequence )
def __snake_case ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=SCREAMING_SNAKE_CASE_ )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 51 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
def __A ( a_ :int) -> str:
if isinstance(a_ , a_):
raise TypeError('''\'float\' object cannot be interpreted as an integer''')
if isinstance(a_ , a_):
raise TypeError('''\'str\' object cannot be interpreted as an integer''')
if num == 0:
return "0b0"
__a : Union[str, Any] = False
if num < 0:
__a : Union[str, Any] = True
__a : str = -num
__a : list[int] = []
while num > 0:
binary.insert(0 , num % 2)
num >>= 1
if negative:
return "-0b" + "".join(str(a_) for e in binary)
return "0b" + "".join(str(a_) for e in binary)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Dict=3_7 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Union[str, Any]=1_0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Any=0.9 , lowerCAmelCase_ : Union[str, Any]=None , ) -> Optional[int]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = tubelet_size
__lowerCAmelCase = num_frames
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = mask_ratio
__lowerCAmelCase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__lowerCAmelCase = int(mask_ratio * self.seq_length )
def lowercase ( self : Tuple ) -> List[str]:
__lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : List[Any] ) -> List[str]:
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> int:
__lowerCAmelCase = VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase = torch.ones((self.num_masks,) )
__lowerCAmelCase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__lowerCAmelCase = mask.expand(self.batch_size , -1 ).bool()
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
__lowerCAmelCase = mask.sum().item()
__lowerCAmelCase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
a_ = (
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = VideoMAEModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=False ) -> Any:
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__lowerCAmelCase = torch.ones((self.model_tester.num_masks,) )
__lowerCAmelCase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__lowerCAmelCase = mask.expand(self.model_tester.batch_size , -1 ).bool()
__lowerCAmelCase = bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase ( self : Any ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def lowercase ( self : Any ) -> Tuple:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> Dict:
if not self.has_attentions:
pass
else:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCAmelCase = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase ( self : List[Any] ) -> str:
def check_hidden_states_output(lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
__lowerCAmelCase = self.model_tester.seq_length - self.model_tester.num_masks
__lowerCAmelCase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase ( self : Tuple ) -> Optional[Any]:
pass
def a_ ( ):
__lowerCAmelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video', filename='eating_spaghetti.npy', repo_type='dataset' )
__lowerCAmelCase = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase ( self : str ) -> int:
__lowerCAmelCase = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
__lowerCAmelCase = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__lowerCAmelCase = torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__lowerCAmelCase = torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__lowerCAmelCase = torch.tensor([0.51_42] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__lowerCAmelCase = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(torch.tensor([0.64_69] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
UpperCAmelCase_ =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
UpperCAmelCase_ =tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
UpperCAmelCase_ =tf_top_k_top_p_filtering(_lowerCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
UpperCAmelCase_ =output[output != -float("inf" )]
UpperCAmelCase_ =tf.cast(
tf.where(tf.not_equal(_lowerCAmelCase , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_lowerCAmelCase , _lowerCAmelCase , rtol=1e-12 )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@require_tf
class A ( unittest.TestCase , __lowercase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
_snake_case ={
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =2
UpperCAmelCase_ =2
class A ( tf.Module ):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
super(_lowerCAmelCase , self ).__init__()
UpperCAmelCase_ =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ =[[2, 0], [102, 103]]
UpperCAmelCase_ =[[1, 0], [1, 1]]
UpperCAmelCase_ =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"serving_default": dummy_model.serving} )
UpperCAmelCase_ =tf.saved_model.load(_lowerCAmelCase ).signatures["serving_default"]
for batch_size in range(1 , len(_lowerCAmelCase ) + 1 ):
UpperCAmelCase_ ={
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
UpperCAmelCase_ =serving_func(**_lowerCAmelCase )["sequences"]
UpperCAmelCase_ =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =1
UpperCAmelCase_ =2
class A ( tf.Module ):
def __init__( self: Optional[int] , _lowerCAmelCase: Any ) -> Optional[Any]:
'''simple docstring'''
super(_lowerCAmelCase , self ).__init__()
UpperCAmelCase_ =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.model.generate(
input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase , max_new_tokens=_lowerCAmelCase , return_dict_in_generate=_lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
UpperCAmelCase_ =[[2], [102, 103]]
UpperCAmelCase_ =[[1], [1, 1]]
UpperCAmelCase_ =DummyModel(model=_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_lowerCAmelCase , _lowerCAmelCase , signatures={"serving_default": dummy_model.serving} )
UpperCAmelCase_ =tf.saved_model.load(_lowerCAmelCase ).signatures["serving_default"]
for input_row in range(len(_lowerCAmelCase ) ):
UpperCAmelCase_ ={
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
UpperCAmelCase_ =serving_func(**_lowerCAmelCase )["sequences"]
UpperCAmelCase_ =test_model.generate(**_lowerCAmelCase , max_new_tokens=_lowerCAmelCase )
tf.debugging.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
@slow
@require_tensorflow_text
def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=_lowerCAmelCase )
class A ( tf.keras.layers.Layer ):
def __init__( self: List[str] ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_lowerCAmelCase , "spiece.model" ) , "rb" ).read() )
UpperCAmelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: Optional[int] , *_lowerCAmelCase: List[str] , **_lowerCAmelCase: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =text.pad_model_inputs(
_lowerCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
UpperCAmelCase_ =self.model.generate(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
return self.tokenizer.detokenize(_lowerCAmelCase )
UpperCAmelCase_ =CompleteSentenceTransformer()
UpperCAmelCase_ =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
UpperCAmelCase_ =complete_model(_lowerCAmelCase )
UpperCAmelCase_ =tf.keras.Model(_lowerCAmelCase , _lowerCAmelCase )
keras_model.save(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ ={
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
UpperCAmelCase_ =14
UpperCAmelCase_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ ="Hello, my dog is cute and"
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_tensors="tf" )
UpperCAmelCase_ =TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ =638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
UpperCAmelCase_ =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
UpperCAmelCase_ =[638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
UpperCAmelCase_ =model.generate(**_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase__ ( self: List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ ="Hugging Face is a technology company based in New York and Paris."
UpperCAmelCase_ =bart_tokenizer(_lowerCAmelCase , return_tensors="tf" ).input_ids
UpperCAmelCase_ =TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase ).numpy()
class A ( __lowercase ):
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any]=None , **_lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase , foo="bar" ).numpy()
self.assertTrue(np.array_equal(_lowerCAmelCase , _lowerCAmelCase ) )
class A ( bart_model.model.encoder.__class__ ):
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any , **_lowerCAmelCase: Tuple ) -> int:
'''simple docstring'''
return super().call(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =FakeEncoder(bart_model.config , bart_model.model.shared )
UpperCAmelCase_ =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
UpperCAmelCase_ =bart_model.generate(_lowerCAmelCase ).numpy()
with self.assertRaises(_lowerCAmelCase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_lowerCAmelCase , foo="bar" )
| 54 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE :Optional[Any] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE :Tuple = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
SCREAMING_SNAKE_CASE :Tuple = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
return float((preds == labels).mean() )
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
__A = simple_accuracy(a_ , a_ )
__A = float(fa_score(y_true=a_ , y_pred=a_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase ( a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = float(pearsonr(a_ , a_ )[0] )
__A = float(spearmanr(a_ , a_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="numpy" ,)
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Dict ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A ,A )}
elif self.config_name == "stsb":
return pearson_and_spearman(A ,A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A ,A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A ,A )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]" )
| 55 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["DPTFeatureExtractor"]
_a : List[str] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 56 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_0_0 , _lowerCamelCase=1_3 , _lowerCamelCase=3_0 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=1_0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , ):
UpperCamelCase_: int = parent
UpperCamelCase_: Any = vocab_size
UpperCamelCase_: Any = batch_size
UpperCamelCase_: Union[str, Any] = image_size
UpperCamelCase_: Union[str, Any] = patch_size
UpperCamelCase_: Union[str, Any] = num_channels
UpperCamelCase_: List[str] = is_training
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Dict = num_hidden_layers
UpperCamelCase_: Dict = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = type_sequence_label_size
UpperCamelCase_: Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_: int = (image_size // patch_size) ** 2
UpperCamelCase_: Optional[Any] = num_patches + 1
def _a ( self ):
UpperCamelCase_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_: str = None
if self.use_labels:
UpperCamelCase_: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Tuple = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = FlaxBeitModel(config=_lowerCamelCase )
UpperCamelCase_: Any = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = FlaxBeitForMaskedImageModeling(config=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = self.type_sequence_label_size
UpperCamelCase_: List[str] = FlaxBeitForImageClassification(config=_lowerCamelCase )
UpperCamelCase_: Tuple = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_: Optional[int] = 1
UpperCamelCase_: Optional[Any] = FlaxBeitForImageClassification(_lowerCamelCase )
UpperCamelCase_: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_: List[Any] = model(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): Optional[int] = config_and_inputs
UpperCamelCase_: List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Tuple =(
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _a ( self ):
UpperCamelCase_: List[Any] = FlaxBeitModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_: Tuple = model_class(_lowerCamelCase )
UpperCamelCase_: List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_: Tuple = [*signature.parameters.keys()]
UpperCamelCase_: int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_: Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = model_class(_lowerCamelCase )
@jax.jit
def model_jitted(_lowerCamelCase , **_lowerCamelCase ):
return model(pixel_values=_lowerCamelCase , **_lowerCamelCase )
with self.subTest('JIT Enabled' ):
UpperCamelCase_: int = model_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_: List[str] = model_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ):
UpperCamelCase_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self ):
for model_class_name in self.all_model_classes:
UpperCamelCase_: List[Any] = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase_: Union[str, Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(_lowerCamelCase )
def snake_case () -> Optional[Any]:
UpperCamelCase_: Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@require_flax
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def _a ( self ):
UpperCamelCase_: Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' )
UpperCamelCase_: Tuple = self.default_image_processor
UpperCamelCase_: Tuple = prepare_img()
UpperCamelCase_: Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='np' ).pixel_values
# prepare bool_masked_pos
UpperCamelCase_: Union[str, Any] = np.ones((1, 1_9_6) , dtype=_lowerCamelCase )
# forward pass
UpperCamelCase_: str = model(pixel_values=_lowerCamelCase , bool_masked_pos=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = outputs.logits
# verify the logits
UpperCamelCase_: Tuple = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_: str = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _lowerCamelCase , atol=1e-2 ) )
@slow
def _a ( self ):
UpperCamelCase_: Tuple = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' )
UpperCamelCase_: List[str] = self.default_image_processor
UpperCamelCase_: Tuple = prepare_img()
UpperCamelCase_: List[Any] = image_processor(images=_lowerCamelCase , return_tensors='np' )
# forward pass
UpperCamelCase_: List[str] = model(**_lowerCamelCase )
UpperCamelCase_: int = outputs.logits
# verify the logits
UpperCamelCase_: Tuple = (1, 1_0_0_0)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_: List[str] = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
UpperCamelCase_: List[Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: Dict = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' )
UpperCamelCase_: Union[str, Any] = self.default_image_processor
UpperCamelCase_: List[str] = prepare_img()
UpperCamelCase_: Dict = image_processor(images=_lowerCamelCase , return_tensors='np' )
# forward pass
UpperCamelCase_: Dict = model(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = outputs.logits
# verify the logits
UpperCamelCase_: List[str] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_: Optional[int] = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
UpperCamelCase_: Union[str, Any] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , _lowerCamelCase )
| 57 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowerCAmelCase : Optional[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 58 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCamelCase : int = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'gpt_bigcode'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_02_57 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_pytorch_tanh" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = scale_attn_weights
lowerCamelCase__ = use_cache
lowerCamelCase__ = attention_softmax_in_fpaa
lowerCamelCase__ = scale_attention_softmax_in_fpaa
lowerCamelCase__ = multi_query
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def _A ( lowerCAmelCase_ : str = "mumbai" ):
"""simple docstring"""
lowerCAmelCase__ = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowerCAmelCase__ = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowerCAmelCase__ = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 61 |
'''simple docstring'''
from PIL import Image
def A__ ( __lowerCAmelCase : Image , __lowerCAmelCase : float ):
def brightness(__lowerCAmelCase : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
UpperCamelCase : Union[str, Any] = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 50 | 0 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def lowerCamelCase__ ( ):
"""simple docstring"""
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62 |
'''simple docstring'''
def A__ ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
UpperCamelCase : Dict = generate_large_matrix()
UpperCamelCase : Any = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__ ( __lowerCAmelCase : list[list[int]] ):
assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid )
assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(__lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCamelCase__ = (left + right) // 2
lowerCamelCase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCamelCase__ = mid + 1
else:
lowerCamelCase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
lowerCamelCase__ = len(grid[0] )
for i in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__lowerCAmelCase ) * len(grid[0] )) - total
def A__ ( __lowerCAmelCase : list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A__ ( __lowerCAmelCase : list[list[int]] ):
lowerCamelCase__ = 0
for row in grid:
for i, number in enumerate(__lowerCAmelCase ):
if number < 0:
total += len(__lowerCAmelCase ) - i
break
return total
def A__ ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowerCamelCase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCamelCase__ = timeit(F'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a : str = get_tests_dir("fixtures/test_sentencepiece.model")
a : int = {"target_lang": "fi", "source_lang": "en"}
a : Any = ">>zh<<"
a : List[Any] = "Helsinki-NLP/"
if is_torch_available():
a : Dict = "pt"
elif is_tf_available():
a : Optional[int] = "tf"
else:
a : List[Any] = "jax"
@require_sentencepiece
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[str] = MarianTokenizer
a : List[Any] = False
a : Union[str, Any] = True
def UpperCAmelCase ( self : str ) -> Optional[Any]:
super().setUp()
__UpperCAmelCase : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__UpperCAmelCase : str = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__UpperCAmelCase : Dict = Path(self.tmpdirname )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self : Optional[Any] , **__lowercase : Any ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : int ) -> Optional[int]:
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : Dict = """</s>"""
__UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Any:
__UpperCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowercase ) , 9 )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__UpperCAmelCase : Union[str, Any] = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
__UpperCAmelCase : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__UpperCAmelCase : Union[str, Any] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(__lowercase , batch.input_ids[0] )
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowercase )
__UpperCAmelCase : Tuple = [x.name for x in Path(__lowercase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowercase )
MarianTokenizer.from_pretrained(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__lowercase , truncation=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : str = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def UpperCAmelCase ( self : Any ) -> List[Any]:
# fmt: off
__UpperCAmelCase : Optional[int] = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : int = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__UpperCAmelCase : Optional[int] = """Tämä on testi"""
__UpperCAmelCase : Any = """This is a test"""
__UpperCAmelCase : int = [76, 7, 2047, 2]
__UpperCAmelCase : Tuple = [69, 12, 11, 940, 2]
__UpperCAmelCase : List[Any] = tokenizer(__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : Optional[int] = tokenizer(text_target=__lowercase ).input_ids
self.assertListEqual(__lowercase , __lowercase )
__UpperCAmelCase : str = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
| 63 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
UpperCamelCase : List[Any] = 'examples/'
UpperCamelCase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
UpperCamelCase : Any = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
UpperCamelCase : Any = 'README.md'
def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ , lowerCamelCase__ = REPLACE_PATTERNS[pattern]
lowerCamelCase__ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowerCamelCase__ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def A__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def A__ ( ):
lowerCamelCase__ = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Find the start of the list.
lowerCamelCase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def A__ ( ):
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ = f.read()
lowerCamelCase__ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : Union[str, Any]=False ):
lowerCamelCase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ = default_version.base_version
elif patch:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def A__ ( ):
lowerCamelCase__ = get_version()
lowerCamelCase__ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowerCamelCase__ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
UpperCamelCase : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 50 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["audio_values", "audio_mask"]
def __init__( self , lowerCAmelCase=2048 , lowerCAmelCase=1 , lowerCAmelCase=[16, 16] , lowerCAmelCase=128 , lowerCAmelCase=44100 , lowerCAmelCase=86 , lowerCAmelCase=2048 , lowerCAmelCase=0.0 , **lowerCAmelCase , ) -> Optional[int]:
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= spectrogram_length
SCREAMING_SNAKE_CASE__: Any= num_channels
SCREAMING_SNAKE_CASE__: str= patch_size
SCREAMING_SNAKE_CASE__: str= feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE__: List[str]= n_fft
SCREAMING_SNAKE_CASE__: Union[str, Any]= sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= sampling_rate
SCREAMING_SNAKE_CASE__: Union[str, Any]= padding_value
SCREAMING_SNAKE_CASE__: int= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def UpperCamelCase_ ( self , lowerCAmelCase ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: int= spectrogram(
lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE__: Dict= log_spec[:, :-1]
SCREAMING_SNAKE_CASE__: str= log_spec - 20.0
SCREAMING_SNAKE_CASE__: str= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__: Tuple= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__: int= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__: Tuple= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: List[str]= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__: int= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE__: Tuple= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE__: str= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE__: str= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE__: Optional[Any]= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__: Union[str, Any]= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE__: Tuple= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Dict= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE__: Dict= audio_features[i]
SCREAMING_SNAKE_CASE__: List[Any]= feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE__: str= {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
SCREAMING_SNAKE_CASE__: int= {'''audio_values''': padded_audio_features}
SCREAMING_SNAKE_CASE__: Dict= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 64 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowercase :
snake_case_ = PegasusConfig
snake_case_ = {}
snake_case_ = """gelu"""
def __init__( self : List[Any] ,A : int ,A : Optional[Any]=13 ,A : Dict=7 ,A : Dict=True ,A : Any=False ,A : Dict=99 ,A : int=32 ,A : Optional[int]=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : str=0.1 ,A : int=0.1 ,A : Optional[int]=20 ,A : Tuple=2 ,A : str=1 ,A : Optional[Any]=0 ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : List[Any] = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : Union[str, Any] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size )
UpperCAmelCase__ : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase__ : Any = np.concatenate([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : str = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase__ : Optional[Any] = prepare_pegasus_inputs_dict(A ,A ,A )
return config, inputs_dict
def __lowercase ( self : Any ,A : Optional[int] ,A : str ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : Dict = model_class_name(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, -1:] ,A ,decoder_attention_mask=A ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=A ,)
UpperCAmelCase__ : Dict = model.decode(A ,A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def __lowercase ( self : Optional[int] ,A : str ,A : Dict ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = 20
UpperCAmelCase__ : str = model_class_name(A )
UpperCAmelCase__ : Any = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase__ : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase__ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] ,A ,A )
UpperCAmelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1] ,A ,decoder_attention_mask=A ,past_key_values=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase__ : Dict = model.decode(
decoder_input_ids[:, -1:] ,A ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=A ,decoder_position_ids=A ,)
UpperCAmelCase__ : Union[str, Any] = model.decode(A ,A ,decoder_attention_mask=A )
UpperCAmelCase__ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=f"Max diff is {diff}" )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.not_equal(__UpperCamelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowercase ( __lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = FlaxPegasusModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self ,config_class=A )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A ,A ,A )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A ,A ,A )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[Any] = self._prepare_for_class(A ,A )
UpperCAmelCase__ : int = model_class(A )
@jax.jit
def encode_jitted(A : Optional[int] ,A : Union[str, Any]=None ,**A : Optional[Any] ):
return model.encode(input_ids=A ,attention_mask=A )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : int = encode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : Dict = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = model_class(A )
UpperCAmelCase__ : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase__ : Dict = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] ,A : Any ,A : List[Any] ):
return model.decode(
decoder_input_ids=A ,decoder_attention_mask=A ,encoder_outputs=A ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ : str = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) ,len(A ) )
for jitted_output, output in zip(A ,A ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=A )
UpperCAmelCase__ : Any = np.ones((1, 1) )
UpperCAmelCase__ : Optional[Any] = model(A )
self.assertIsNotNone(A )
@slow
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
UpperCAmelCase__ : Union[str, Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
UpperCAmelCase__ : str = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
UpperCAmelCase__ : str = tokenizer(A ,return_tensors="""np""" ,truncation=A ,max_length=512 ,padding=A )
UpperCAmelCase__ : Union[str, Any] = model.generate(**A ,num_beams=2 ).sequences
UpperCAmelCase__ : int = tokenizer.batch_decode(A ,skip_special_tokens=A )
assert tgt_text == decoded
| 65 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( __lowerCAmelCase : Any ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def A__ ( __lowerCAmelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase__ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = set()
for token in tokens:
lowerCamelCase__ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowerCamelCase__ = list(__lowerCAmelCase )
return word_list
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ):
if not chinese_word_set:
return bert_tokens
lowerCamelCase__ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowerCamelCase__ = bert_tokens
lowerCamelCase__ , lowerCamelCase__ = 0, len(__lowerCAmelCase )
while start < end:
lowerCamelCase__ = True
if is_chinese(bert_word[start] ):
lowerCamelCase__ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowerCamelCase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase__ = """##""" + bert_word[j]
lowerCamelCase__ = start + i
lowerCamelCase__ = False
break
if single_word:
start += 1
return bert_word
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ):
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowerCamelCase__ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
lowerCamelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowerCamelCase__ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = []
for id in input_ids:
lowerCamelCase__ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowerCamelCase__ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__ ( __lowerCAmelCase : Optional[int] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase__ = LTP(args.ltp ) # faster in GPU device
lowerCamelCase__ = BertTokenizer.from_pretrained(args.bert )
lowerCamelCase__ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
UpperCamelCase : Any = parser.parse_args()
main(args)
| 50 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
UpperCamelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self , _lowerCAmelCase ):
return FSMTTokenizer.from_pretrained(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_lowercase : Optional[Any] = F"""facebook/wmt19-{pair}"""
_lowercase : Any = self.get_tokenizer(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.get_model(_lowerCAmelCase )
_lowercase : Any = bleu_data[pair]['src']
_lowercase : Tuple = bleu_data[pair]['tgt']
_lowercase : Tuple = tokenizer(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase , padding='longest' ).to(_lowerCAmelCase )
_lowercase : List[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowercase : Union[str, Any] = tokenizer.batch_decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
_lowercase : Optional[Any] = calculate_bleu(_lowerCAmelCase , _lowerCAmelCase )
print(_lowerCAmelCase )
self.assertGreaterEqual(scores['bleu'] , _lowerCAmelCase )
| 66 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50 | 0 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :tuple ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowercase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10 ) -> int:
return sum(
int(''.join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 67 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Tuple = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Dict = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """lilt"""
def __init__( self : List[Any] , a_ : List[Any]=30_522 , a_ : Optional[int]=768 , a_ : Optional[Any]=12 , a_ : Union[str, Any]=12 , a_ : Optional[int]=3_072 , a_ : Dict="gelu" , a_ : Union[str, Any]=0.1 , a_ : str=0.1 , a_ : Optional[int]=512 , a_ : Tuple=2 , a_ : Dict=0.02 , a_ : Tuple=1e-12 , a_ : str=0 , a_ : Union[str, Any]="absolute" , a_ : Dict=None , a_ : List[str]=4 , a_ : Optional[Any]=1_024 , **a_ : str , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = classifier_dropout
__snake_case = channel_shrink_ratio
__snake_case = max_ad_position_embeddings
| 69 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : int = {
'configuration_xmod': [
'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XmodConfig',
'XmodOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST',
'XmodForCausalLM',
'XmodForMaskedLM',
'XmodForMultipleChoice',
'XmodForQuestionAnswering',
'XmodForSequenceClassification',
'XmodForTokenClassification',
'XmodModel',
'XmodPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict , lowercase : List[Any] , lowercase : Dict=True , lowercase : int="pt" ):
'''simple docstring'''
lowerCamelCase_ = {'add_prefix_space': True} if isinstance(lowercase , lowercase ) and not line.startswith(' ' ) else {}
lowerCamelCase_ = padding_side
return tokenizer(
[line] , max_length=lowercase , padding='max_length' if pad_to_max_length else None , truncation=lowercase , return_tensors=lowercase , add_special_tokens=lowercase , **lowercase , )
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : List[str] , lowercase : Dict=None , ):
'''simple docstring'''
lowerCamelCase_ = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : Dict , A_ : Dict="train" , A_ : Dict=None , A_ : Optional[Any]=None , A_ : Any=None , A_ : Union[str, Any]="" , ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ = Path(A_ ).joinpath(type_path + '.source' )
lowerCamelCase_ = Path(A_ ).joinpath(type_path + '.target' )
lowerCamelCase_ = self.get_char_lens(self.src_file )
lowerCamelCase_ = max_source_length
lowerCamelCase_ = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowerCamelCase_ = tokenizer
lowerCamelCase_ = prefix
if n_obs is not None:
lowerCamelCase_ = self.src_lens[:n_obs]
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
def __len__( self : int ) -> int:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Tuple , A_ : Dict ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = index + 1 # linecache starts at 1
lowerCamelCase_ = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('\n' )
lowerCamelCase_ = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('\n' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCamelCase_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
lowerCamelCase_ = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
lowerCamelCase_ = encode_line(A_ , A_ , self.max_source_length , 'right' )
lowerCamelCase_ = encode_line(A_ , A_ , self.max_target_length , 'right' )
lowerCamelCase_ = source_inputs['input_ids'].squeeze()
lowerCamelCase_ = target_inputs['input_ids'].squeeze()
lowerCamelCase_ = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a__ ( A_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def a__ ( self : Optional[Any] , A_ : Optional[Any] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ = torch.stack([x['input_ids'] for x in batch] )
lowerCamelCase_ = torch.stack([x['attention_mask'] for x in batch] )
lowerCamelCase_ = torch.stack([x['decoder_input_ids'] for x in batch] )
lowerCamelCase_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
lowerCamelCase_ = trim_batch(A_ , A_ )
lowerCamelCase_ , lowerCamelCase_ = trim_batch(A_ , A_ , attention_mask=A_ )
lowerCamelCase_ = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase : Optional[int] = getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = get_git_info()
save_json(lowercase , os.path.join(lowercase , 'git_log.json' ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Tuple , lowercase : str=4 , **lowercase : Optional[Any] ):
'''simple docstring'''
with open(lowercase , 'w' ) as f:
json.dump(lowercase , lowercase , indent=lowercase , **lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
with open(lowercase ) as f:
return json.load(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = git.Repo(search_parent_directories=lowercase )
lowerCamelCase_ = {
'repo_id': str(lowercase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : Iterable ):
'''simple docstring'''
return list(map(lowercase , lowercase ) )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] ):
'''simple docstring'''
with open(lowercase , 'wb' ) as f:
return pickle.dump(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
def remove_articles(lowercase : int ):
return re.sub(r'\b(a|an|the)\b' , ' ' , lowercase )
def white_space_fix(lowercase : List[str] ):
return " ".join(text.split() )
def remove_punc(lowercase : List[Any] ):
lowerCamelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = normalize_answer(lowercase ).split()
lowerCamelCase_ = normalize_answer(lowercase ).split()
lowerCamelCase_ = Counter(lowercase ) & Counter(lowercase )
lowerCamelCase_ = sum(common.values() )
if num_same == 0:
return 0
lowerCamelCase_ = 1.0 * num_same / len(lowercase )
lowerCamelCase_ = 1.0 * num_same / len(lowercase )
lowerCamelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[str] ):
'''simple docstring'''
assert len(lowercase ) == len(lowercase )
lowerCamelCase_ = 0
for hypo, pred in zip(lowercase , lowercase ):
em += exact_match_score(lowercase , lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCamelCase_ = 'dropout_rate'
for p in extra_params:
if getattr(lowercase , lowercase , lowercase ):
if not hasattr(lowercase , lowercase ) and not hasattr(lowercase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowercase ) )
delattr(lowercase , lowercase )
continue
lowerCamelCase_ = p if hasattr(lowercase , lowercase ) else equivalent_param[p]
setattr(lowercase , lowercase , getattr(lowercase , lowercase ) )
delattr(lowercase , lowercase )
return hparams, config
| 70 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> typing.Counter[int]:
"""simple docstring"""
UpperCAmelCase_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_SCREAMING_SNAKE_CASE , max_perimeter + 1 ):
UpperCAmelCase_ : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def a__ ( _SCREAMING_SNAKE_CASE : int = 10_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = pythagorean_triple(_SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 71 |
'''simple docstring'''
import os
from pathlib import Path
def A__ ( ):
from torch.utils.cpp_extension import load
lowerCamelCase__ = Path(__lowerCAmelCase ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
lowerCamelCase__ = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __lowerCAmelCase , with_cuda=__lowerCAmelCase , extra_include_paths=[str(__lowerCAmelCase )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 50 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase =FunnelConfig.from_json_file(lowercase_ )
print(f'Building PyTorch model from configuration: {config}' )
lowercase =FunnelBaseModel(lowercase_ ) if base_model else FunnelModel(lowercase_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 72 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] ):
lowerCamelCase__ = len(__lowerCAmelCase )
print("""The following activities are selected:""" )
# The first activity is always selected
lowerCamelCase__ = 0
print(__lowerCAmelCase , end=""",""" )
# Consider rest of the activities
for j in range(__lowerCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=""",""" )
lowerCamelCase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = [1, 3, 0, 5, 8, 5]
UpperCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 50 | 0 |
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCAmelCase):
return None
SCREAMING_SNAKE_CASE = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE = left
SCREAMING_SNAKE_CASE = point
elif point > right:
SCREAMING_SNAKE_CASE = right
SCREAMING_SNAKE_CASE = point
else:
if item < current_item:
SCREAMING_SNAKE_CASE = point - 1
else:
SCREAMING_SNAKE_CASE = point + 1
return None
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_UpperCAmelCase):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
elif point > right:
return interpolation_search_by_recursion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , point - 1)
else:
return interpolation_search_by_recursion(
_UpperCAmelCase , _UpperCAmelCase , point + 1 , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
if collection != sorted(_UpperCAmelCase):
raise ValueError('Collection must be ascending sorted')
return True
if __name__ == "__main__":
import sys
a_ : Tuple = 0
if debug == 1:
a_ : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ : List[str] = 67
a_ : List[str] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print('Not found')
| 73 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" ,_lowerCAmelCase ,)
super().__init__(args=_lowerCAmelCase ,**_lowerCAmelCase )
| 50 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def a__ ( snake_case ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = create_tensor(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = gather(snake_case )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [state.process_index]
__SCREAMING_SNAKE_CASE : Optional[int] = gather_object(snake_case )
assert len(snake_case ) == state.num_processes, F'''{gathered_obj}, {len(snake_case )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = create_tensor(snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = broadcast(snake_case )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def a__ ( snake_case ):
"""simple docstring"""
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__SCREAMING_SNAKE_CASE : List[str] = torch.arange(state.num_processes ).to(state.device )
__SCREAMING_SNAKE_CASE : List[Any] = pad_across_processes(snake_case )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def a__ ( snake_case ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
__SCREAMING_SNAKE_CASE : Any = create_tensor(snake_case )
__SCREAMING_SNAKE_CASE : str = reduce(snake_case , '''sum''' )
__SCREAMING_SNAKE_CASE : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def a__ ( snake_case ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
__SCREAMING_SNAKE_CASE : Dict = create_tensor(snake_case )
__SCREAMING_SNAKE_CASE : Any = reduce(snake_case , '''mean''' )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(snake_case , snake_case ), F'''{reduced_tensor} != {truth_tensor}'''
def a__ ( snake_case ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = PartialState()
state.print(F'''State: {state}''' )
state.print('''testing gather''' )
test_gather(snake_case )
state.print('''testing gather_object''' )
test_gather_object(snake_case )
state.print('''testing broadcast''' )
test_broadcast(snake_case )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(snake_case )
state.print('''testing reduce_sum''' )
test_reduce_sum(snake_case )
state.print('''testing reduce_mean''' )
test_reduce_mean(snake_case )
if __name__ == "__main__":
main()
| 74 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A__ ( __lowerCAmelCase : List[str] ):
lowerCamelCase__ = []
for line in lines:
lowerCamelCase__ = re.sub(R"""#.*""" , """""" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
lowerCamelCase__ = """\n""".join(__lowerCAmelCase )
# Make a hash from all this code
lowerCamelCase__ = full_str.encode("""utf-8""" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase : Dict = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase : str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase : List[Any] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 50 | 0 |
'''simple docstring'''
import operator as op
UpperCamelCase__ = '''scaler.pt'''
UpperCamelCase__ = '''pytorch_model'''
UpperCamelCase__ = '''random_states'''
UpperCamelCase__ = '''optimizer'''
UpperCamelCase__ = '''scheduler'''
UpperCamelCase__ = '''pytorch_model.bin'''
UpperCamelCase__ = '''pytorch_model.bin.index.json'''
UpperCamelCase__ = '''model.safetensors'''
UpperCamelCase__ = '''model.safetensors.index.json'''
UpperCamelCase__ = '''1.10.2'''
UpperCamelCase__ = '''py38'''
UpperCamelCase__ = '''4.17.0'''
UpperCamelCase__ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
UpperCamelCase__ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
UpperCamelCase__ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
UpperCamelCase__ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
UpperCamelCase__ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
UpperCamelCase__ = '''2.0.1'''
UpperCamelCase__ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
UpperCamelCase__ = ['''default''', '''reduce-overhead''', '''max-autotune''']
UpperCamelCase__ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase__ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
UpperCamelCase__ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
UpperCamelCase__ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 75 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : int = name
__lowercase : Tuple = val
def __str__( self ) -> Any:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCamelCase_ ) -> int:
return self.val < other.val
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> List[Any]:
__lowercase : Union[str, Any] = {}
__lowercase : Optional[Any] = {}
__lowercase : int = self.build_heap(UpperCamelCase_ )
def __getitem__( self , UpperCamelCase_ ) -> str:
return self.get_value(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
return (idx - 1) // 2
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
return idx * 2 + 1
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return idx * 2 + 2
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
return self.heap_dict[key]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[Any] = len(UpperCamelCase_ ) - 1
__lowercase : Optional[Any] = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
__lowercase : Tuple = idx
__lowercase : Tuple = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
while True:
__lowercase : Optional[Any] = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
__lowercase : Optional[Any] = self.get_right_child_idx(UpperCamelCase_ )
__lowercase : Any = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
__lowercase : int = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
__lowercase : str = r
if smallest != idx:
__lowercase ,__lowercase : Union[str, Any] = array[smallest], array[idx]
(
(
__lowercase
) ,(
__lowercase
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowercase : Optional[Any] = smallest
else:
break
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : List[str] = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowercase ,__lowercase : int = self.heap[idx], self.heap[p]
__lowercase ,__lowercase : Any = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowercase : Any = p
__lowercase : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
return self.heap[0]
def _lowerCamelCase ( self ) -> List[str]:
__lowercase ,__lowercase : Dict = self.heap[-1], self.heap[0]
__lowercase ,__lowercase : Optional[int] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowercase : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
self.heap.append(UpperCamelCase_ )
__lowercase : Optional[Any] = len(self.heap ) - 1
__lowercase : Dict = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowerCamelCase ( self ) -> List[Any]:
return len(self.heap ) == 0
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowercase : int = new_value
__lowercase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
a_ = Node('R', -1)
a_ = Node('B', 6)
a_ = Node('A', 3)
a_ = Node('X', 1)
a_ = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( __lowerCAmelCase : dict ):
return (data["data"], data["target"])
def A__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ):
lowerCamelCase__ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowerCamelCase__ = xgb.predict(__lowerCAmelCase )
lowerCamelCase__ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def A__ ( ):
lowerCamelCase__ = fetch_california_housing()
lowerCamelCase__ , lowerCamelCase__ = data_handling(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowerCamelCase__ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 50 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = "dinat"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , UpperCamelCase_ : int=4 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Union[str, Any]=[3, 4, 6, 5] , UpperCamelCase_ : Union[str, Any]=[2, 4, 8, 16] , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_ : Tuple=3.0 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Union[str, Any]=0.0 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : List[str] = embed_dim
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : List[str] = len(UpperCamelCase_)
__UpperCAmelCase : Tuple = num_heads
__UpperCAmelCase : Union[str, Any] = kernel_size
__UpperCAmelCase : Dict = dilations
__UpperCAmelCase : Optional[int] = mlp_ratio
__UpperCAmelCase : Tuple = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Tuple = attention_probs_dropout_prob
__UpperCAmelCase : str = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(UpperCamelCase_) - 1))
__UpperCAmelCase : Dict = layer_scale_init_value
__UpperCAmelCase : int = ["stem"] + [F"stage{idx}" for idx in range(1 , len(UpperCamelCase_) + 1)]
__UpperCAmelCase , __UpperCAmelCase : str = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names)
| 77 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = generate_pascal_triangle(snake_case_ )
for row_idx in range(snake_case_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = []
for current_row_idx in range(snake_case_ ):
UpperCAmelCase_ = populate_current_row(snake_case_ , snake_case_ )
triangle.append(snake_case_ )
return triangle
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , snake_case_ ):
calculate_current_element(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return current_row
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , ) -> None:
'''simple docstring'''
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( snake_case_ : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , snake_case_ ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(snake_case_ , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(snake_case_ )
return result
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 78 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : Union[str, Any] = value
UpperCAmelCase__ : Node | None = None
UpperCAmelCase__ : Node | None = None
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
UpperCAmelCase__ : int = tree
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :List[Any] = 'big_bird'
def __init__( self : str , _lowerCAmelCase : Any=5_0358 , _lowerCAmelCase : Optional[int]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Optional[int]=12 , _lowerCAmelCase : Optional[Any]=3072 , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=4096 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=0 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Optional[int]=66 , _lowerCAmelCase : Dict="block_sparse" , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : str=False , _lowerCAmelCase : str=64 , _lowerCAmelCase : Union[str, Any]=3 , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , sep_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = use_cache
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
__lowercase = classifier_dropout
class __UpperCamelCase ( _lowerCAmelCase ):
@property
def _a ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase : Union[str, Any] = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.