code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = 8
# DPR tok
lowerCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
def UpperCAmelCase ( self )-> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase ( self )-> DPRContextEncoderTokenizer:
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def UpperCAmelCase ( self )-> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_dataset()
lowerCAmelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCAmelCase__ = dataset
lowerCAmelCase__ = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_dataset()
lowerCAmelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowerCAmelCase__ = os.path.join(self.tmpdirname , "dataset" )
lowerCAmelCase__ = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowerCAmelCase__ = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowerCAmelCase__ = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase__ = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowerCAmelCase__ = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , "wb" ) )
lowerCAmelCase__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowerCAmelCase__ = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowerCAmelCase__ = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_legacy_index_retriever()
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
import torch
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_canonical_hf_index_retriever()
lowerCAmelCase__ = [[5, 7], [10, 11]]
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
lowerCAmelCase__ = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors="pt" , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_dpr_ctx_encoder_tokenizer()
lowerCAmelCase__ = 1
lowerCAmelCase__ = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ = [[5, 7], [10, 11]]
lowerCAmelCase__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowerCAmelCase__ = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 340 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def _a ( UpperCamelCase_ : int ) -> str:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowerCAmelCase__ = precision
lowerCAmelCase__ = ceil(precision / 14 )
lowerCAmelCase__ = 426_880 * Decimal(10_005 ).sqrt()
lowerCAmelCase__ = 1
lowerCAmelCase__ = 13_591_409
lowerCAmelCase__ = Decimal(UpperCamelCase_ )
for k in range(1 , UpperCamelCase_ ):
lowerCAmelCase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCamelCase_ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
a_ = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=UpperCamelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=UpperCamelCase_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=UpperCamelCase_ )
return parser.parse_args()
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = parse_args()
# Import training_script as a module.
lowerCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase__ = script_fpath.stem
lowerCAmelCase__ = importlib.import_module(UpperCamelCase_ )
# Patch sys.argv
lowerCAmelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=1_000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ = n - 1
lowerCAmelCase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ = 0
while count < prec:
lowerCAmelCase__ = random.randint(2 , n - 1 )
lowerCAmelCase__ = bin_exp_mod(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if b != 1:
lowerCAmelCase__ = True
for _ in range(UpperCamelCase_ ):
if b == n - 1:
lowerCAmelCase__ = False
break
lowerCAmelCase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
a_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowercase__ ( _UpperCAmelCase ):
a_ ="""microsoft/speecht5_tts"""
a_ =(
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a_ ="""text_reader"""
a_ =SpeechTaProcessor
a_ =SpeechTaForTextToSpeech
a_ =SpeechTaHifiGan
a_ =["""text"""]
a_ =["""audio"""]
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase__ = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.pre_processor(text=__UpperCAmelCase , return_tensors="pt" , truncation=__UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase__ = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__UpperCAmelCase ).cpu().detach()
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""gpt_neo"""
a_ =["""past_key_values"""]
a_ ={"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , __UpperCAmelCase=50257 , __UpperCAmelCase=2048 , __UpperCAmelCase=2048 , __UpperCAmelCase=24 , __UpperCAmelCase=[[["global", "local"], 12]] , __UpperCAmelCase=16 , __UpperCAmelCase=None , __UpperCAmelCase=256 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = window_size
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = resid_dropout
lowerCAmelCase__ = embed_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = classifier_dropout
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = attention_types
lowerCAmelCase__ = self.expand_attention_types_params(__UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
import torch
lowerCAmelCase__ = input.size()
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = shape[dimension]
lowerCAmelCase__ = torch.arange(0 , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = torch.div(sizedim - size , UpperCamelCase_ , rounding_mode="floor" ) + 1
lowerCAmelCase__ = torch.arange(UpperCamelCase_ ) + low_indices[:min_length][:, None]
lowerCAmelCase__ = [slice(UpperCamelCase_ )] * rank
lowerCAmelCase__ = indices
lowerCAmelCase__ = input[s]
lowerCAmelCase__ = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> int:
"""simple docstring"""
import torch
lowerCAmelCase__ = torch.arange(1 , UpperCamelCase_ )
lowerCAmelCase__ = torch.remainder(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = remainders == 0
lowerCAmelCase__ = candidates[divisor_indices]
lowerCAmelCase__ = torch.max(UpperCamelCase_ )
return largest_divisor, torch.div(UpperCamelCase_ , UpperCamelCase_ , rounding_mode="floor" )
class lowercase__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
lowerCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return self._config.num_heads
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , )-> Mapping[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase__ = seqlen + 2
lowerCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase__ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return 13
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
def _a ( UpperCamelCase_ : str ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = [int(UpperCamelCase_ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
a_ = input().strip()
a_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"{ip} is a {valid_or_invalid} IP v4 address.")
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ = typing.Union[np.floataa, int, float] # noqa: UP007
def _a ( UpperCamelCase_ : Vector , UpperCamelCase_ : Vector ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCamelCase_ ) - np.asarray(UpperCamelCase_ )) ** 2 ) )
def _a ( UpperCamelCase_ : Vector , UpperCamelCase_ : Vector ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _a ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10_000 , globals=globals() , ) )
benchmark()
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""vit"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , **__UpperCAmelCase , )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = encoder_stride
class lowercase__ ( _UpperCAmelCase ):
a_ =version.parse("""1.11""" )
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase ( self )-> float:
'''simple docstring'''
return 1E-4
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a_ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a_ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = json.loads(f.read() )
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
lowerCAmelCase__ = collections.OrderedDict()
with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = b
lowerCAmelCase__ = idx
for wd in b:
lowerCAmelCase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|startoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(
unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , do_clean_text=__UpperCAmelCase , **__UpperCAmelCase , )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__UpperCAmelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowerCAmelCase__ = do_clean_text
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = load_vocab_and_emoji(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(__UpperCAmelCase , clean=self.do_clean_text )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = "".join(__UpperCAmelCase ).strip()
return out_string
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = 0
if os.path.isdir(__UpperCAmelCase ):
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowerCAmelCase__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCAmelCase__ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase__ = token_index
writer.write(",".join(__UpperCAmelCase ) + "\n" )
index += 1
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __UpperCAmelCase )
return vocab_file, emoji_file
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = vocab # same as swe
lowerCAmelCase__ = ids_to_tokens # same as bpe
lowerCAmelCase__ = emoji
lowerCAmelCase__ = np.max([len(__UpperCAmelCase ) for w in self.vocab.keys()] )
lowerCAmelCase__ = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowerCAmelCase__ = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowerCAmelCase__ = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowerCAmelCase__ = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase__ = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase__ = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowerCAmelCase__ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCAmelCase__ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCAmelCase__ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self )-> Optional[Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.content_repattera.sub("<URL>" , __UpperCAmelCase )
lowerCAmelCase__ = self.content_repattera.sub("<EMAIL>" , __UpperCAmelCase )
lowerCAmelCase__ = self.content_repattera.sub("<TEL>" , __UpperCAmelCase )
lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __UpperCAmelCase )
lowerCAmelCase__ = self.content_repattera.sub("<DATE>" , __UpperCAmelCase )
lowerCAmelCase__ = self.content_repattera.sub("<PRICE>" , __UpperCAmelCase )
lowerCAmelCase__ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase__ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = text.replace(" " , "<SP>" )
lowerCAmelCase__ = text.replace(" " , "<SP>" )
lowerCAmelCase__ = text.replace("\r\n" , "<BR>" )
lowerCAmelCase__ = text.replace("\n" , "<BR>" )
lowerCAmelCase__ = text.replace("\r" , "<BR>" )
lowerCAmelCase__ = text.replace("\t" , "<TAB>" )
lowerCAmelCase__ = text.replace("—" , "ー" )
lowerCAmelCase__ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase__ = text.replace(__UpperCAmelCase , __UpperCAmelCase )
if clean:
lowerCAmelCase__ = self.clean_text(__UpperCAmelCase )
def check_simbol(__UpperCAmelCase ):
lowerCAmelCase__ = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 2:
lowerCAmelCase__ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(__UpperCAmelCase ):
lowerCAmelCase__ = x.encode()
if len(__UpperCAmelCase ) == 1 and len(__UpperCAmelCase ) == 3:
lowerCAmelCase__ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
while pos < len(__UpperCAmelCase ):
lowerCAmelCase__ = min(len(__UpperCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
lowerCAmelCase__ = [] # (token_id, token, pos)
for e in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
lowerCAmelCase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCAmelCase ) > 2:
lowerCAmelCase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCAmelCase ) > 0:
# the smallest token_id is adopted
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[0] )[0]
result.append(__UpperCAmelCase )
lowerCAmelCase__ = e
else:
lowerCAmelCase__ = pos + 1
lowerCAmelCase__ = text[pos:end]
if check_simbol(__UpperCAmelCase ):
result.append("<KIGOU>" )
elif checkuae(__UpperCAmelCase ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowerCAmelCase__ = end
return result
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase="\n" )-> Any:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("utf-8" , errors="replace" ) )
lowerCAmelCase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__UpperCAmelCase )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
words.append(bytearray(__UpperCAmelCase ).decode("utf-8" , errors="replace" ) )
lowerCAmelCase__ = "".join(__UpperCAmelCase )
return text
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
a_ = get_logger(__name__)
class lowercase__ :
a_ ="""dummy_data"""
a_ ="""datasets"""
a_ =False
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , )-> str:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = dataset_name
lowerCAmelCase__ = cache_dir
lowerCAmelCase__ = use_local_dummy_data
lowerCAmelCase__ = config
# download_callbacks take a single url as input
lowerCAmelCase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCAmelCase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCAmelCase__ = str(__UpperCAmelCase )
# to be downloaded
lowerCAmelCase__ = None
lowerCAmelCase__ = None
@property
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
lowerCAmelCase__ = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCAmelCase__ = cached_path(
__UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase )
return os.path.join(__UpperCAmelCase , self.dummy_file_name )
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
if self._bucket_url is None:
lowerCAmelCase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCAmelCase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCAmelCase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase )
else:
return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase )-> List[str]:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
return self.download_and_extract(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
return path
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for single_url in single_urls:
download_callback(__UpperCAmelCase )
else:
lowerCAmelCase__ = single_urls
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls]
else:
lowerCAmelCase__ = single_urls
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) )
lowerCAmelCase__ = value
# make sure that values are unique
if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCAmelCase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCAmelCase__ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url )
lowerCAmelCase__ = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCAmelCase__ = [data_url[0]] * len(__UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__UpperCAmelCase )
return dummy_data_list
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
def _iter_archive_members(__UpperCAmelCase ):
# this preserves the order of the members inside the ZIP archive
lowerCAmelCase__ = Path(self.dummy_file ).parent
lowerCAmelCase__ = path.relative_to(__UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCAmelCase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__UpperCAmelCase )
lowerCAmelCase__ = Path(__UpperCAmelCase )
lowerCAmelCase__ = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" )
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [paths]
for path in paths:
if os.path.isfile(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ):
if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__UpperCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a_ = 8
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : List[str]=BITS ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = x.device
lowerCAmelCase__ = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ )
lowerCAmelCase__ = rearrange(UpperCamelCase_ , "d -> d 1 1" )
lowerCAmelCase__ = rearrange(UpperCamelCase_ , "b c h w -> b c 1 h w" )
lowerCAmelCase__ = ((x & mask) != 0).float()
lowerCAmelCase__ = rearrange(UpperCamelCase_ , "b c d h w -> b (c d) h w" )
lowerCAmelCase__ = bits * 2 - 1
return bits
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=BITS ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = x.device
lowerCAmelCase__ = (x > 0).int()
lowerCAmelCase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase_ , dtype=torch.intaa )
lowerCAmelCase__ = rearrange(UpperCamelCase_ , "d -> d 1 1" )
lowerCAmelCase__ = rearrange(UpperCamelCase_ , "b (c d) h w -> b c d h w" , d=8 )
lowerCAmelCase__ = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 255).clamp(0.0 , 1.0 )
def _a ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase__ = self.alphas_cumprod[timestep]
lowerCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase__ = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase__ = self._get_variance(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase__ = model_output.device if torch.is_tensor(UpperCamelCase_ ) else "cpu"
lowerCAmelCase__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ ).to(UpperCamelCase_ )
lowerCAmelCase__ = self._get_variance(UpperCamelCase_ , UpperCamelCase_ ) ** 0.5 * eta * noise
lowerCAmelCase__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
def _a ( self : Tuple , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Any="epsilon" , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
lowerCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ , lowerCAmelCase__ = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase__ = None
# 1. compute alphas, betas
lowerCAmelCase__ = self.alphas_cumprod[t]
lowerCAmelCase__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase__ = 1 - alpha_prod_t
lowerCAmelCase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase__ = model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
lowerCAmelCase__ = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ = torch.clamp(UpperCamelCase_ , -scale , UpperCamelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase__ = 0
if t > 0:
lowerCAmelCase__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase_ ).to(model_output.device )
lowerCAmelCase__ = (self._get_variance(UpperCamelCase_ , predicted_variance=UpperCamelCase_ ) ** 0.5) * noise
lowerCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1.0 , )-> List[str]:
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = bit_scale
lowerCAmelCase__ = (
ddim_bit_scheduler_step if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , )-> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
lowerCAmelCase__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCAmelCase , )
lowerCAmelCase__ = decimal_to_bits(__UpperCAmelCase ) * self.bit_scale
lowerCAmelCase__ = latents.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
lowerCAmelCase__ = bits_to_decimal(__UpperCAmelCase )
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
from collections.abc import Sequence
from queue import Queue
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None )-> str:
'''simple docstring'''
lowerCAmelCase__ = start
lowerCAmelCase__ = end
lowerCAmelCase__ = val
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = left
lowerCAmelCase__ = right
def __repr__( self )-> Tuple:
'''simple docstring'''
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = collection
lowerCAmelCase__ = function
if self.collection:
lowerCAmelCase__ = self._build_tree(0 , len(__UpperCAmelCase ) - 1 )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
self._update_tree(self.root , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
return self._query_range(self.root , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
if start == end:
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.collection[start] )
lowerCAmelCase__ = (start + end) // 2
lowerCAmelCase__ = self._build_tree(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._build_tree(mid + 1 , __UpperCAmelCase )
return SegmentTreeNode(__UpperCAmelCase , __UpperCAmelCase , self.fn(left.val , right.val ) , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
if node.start == i and node.end == i:
lowerCAmelCase__ = val
return
if i <= node.mid:
self._update_tree(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
self._update_tree(node.right , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self.fn(node.left.val , node.right.val )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __UpperCAmelCase , __UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
if self.root is not None:
lowerCAmelCase__ = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
a_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class lowercase__ :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
if not conversation_id:
lowerCAmelCase__ = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ = []
if generated_responses is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = conversation_id
lowerCAmelCase__ = past_user_inputs
lowerCAmelCase__ = generated_responses
lowerCAmelCase__ = text
def __eq__( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False )-> str:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
lowerCAmelCase__ = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
lowerCAmelCase__ = text
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ = None
def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
lowerCAmelCase__ = "user" if is_user else "bot"
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
_UpperCAmelCase, r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""", )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ = self.tokenizer.eos_token
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
if min_length_for_response is not None:
lowerCAmelCase__ = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
lowerCAmelCase__ = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
lowerCAmelCase__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length )
lowerCAmelCase__ = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
lowerCAmelCase__ = max_length - minimum_tokens
lowerCAmelCase__ = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ = model_inputs["attention_mask"][:, -trim:]
lowerCAmelCase__ = model_inputs.pop("conversation" )
lowerCAmelCase__ = max_length
lowerCAmelCase__ = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ = 1
else:
lowerCAmelCase__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = model_outputs["output_ids"]
lowerCAmelCase__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
lowerCAmelCase__ = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer.eos_token_id
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
lowerCAmelCase__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a_ = '''base_with_context'''
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[F"layers_{lyr_num}"]
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCAmelCase__ = ly_weight["attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ = weights[F"layers_{lyr_num}"]
lowerCAmelCase__ = ly_weight["attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase_ )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ = weights[F"layers_{lyr_num}"]
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCAmelCase__ = ly_weight["self_attention"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCAmelCase__ = ly_weight["MultiHeadDotProductAttention_0"]
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCAmelCase__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def _a ( UpperCamelCase_ : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ = jnp.tree_util.tree_map(onp.array , UpperCamelCase_ )
lowerCAmelCase__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCAmelCase__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowerCAmelCase__ = inference.parse_training_gin_file(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = inference.InferenceModel(args.checkpoint_path , UpperCamelCase_ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowerCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , UpperCamelCase_ )
lowerCAmelCase__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , UpperCamelCase_ )
lowerCAmelCase__ = load_decoder(ta_checkpoint["target"]["decoder"] , UpperCamelCase_ )
lowerCAmelCase__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
a_ = parser.parse_args()
main(args)
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =FunnelTokenizer
a_ =FunnelTokenizerFast
a_ =True
a_ =True
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = "UNwant\u00E9d,running"
lowerCAmelCase__ = "unwanted, running"
return input_text, output_text
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" )
lowerCAmelCase__ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
lowerCAmelCase__ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
def _a ( UpperCamelCase_ : list[list[int | float]] ) -> int:
"""simple docstring"""
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = len(matrix[0] )
lowerCAmelCase__ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
lowerCAmelCase__ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ = matrix[i], matrix[row]
lowerCAmelCase__ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"Input value of [number={number}] must be an integer"
raise TypeError(UpperCamelCase_ )
if number < 0:
return False
lowerCAmelCase__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : str="train" ) -> Optional[int]:
"""simple docstring"""
return calculate_hypothesis_value(UpperCamelCase_ , UpperCamelCase_ ) - output(
UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = 0
for i in range(len(UpperCamelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str]=m ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
for i in range(UpperCamelCase_ ):
if index == -1:
summation_value += _error(UpperCamelCase_ )
else:
summation_value += _error(UpperCamelCase_ ) * train_data[i][0][index]
return summation_value
def _a ( UpperCamelCase_ : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = summation_of_cost_derivative(UpperCamelCase_ , UpperCamelCase_ ) / m
return cost_derivative_value
def _a ( ) -> Dict:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase__ = 0.000_002
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while True:
j += 1
lowerCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = get_cost_derivative(i - 1 )
lowerCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
UpperCamelCase_ , UpperCamelCase_ , atol=UpperCamelCase_ , rtol=UpperCamelCase_ , ):
break
lowerCAmelCase__ = temp_parameter_vector
print(("Number of iterations:", j) )
def _a ( ) -> Dict:
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
print(("Actual output value:", output(UpperCamelCase_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(UpperCamelCase_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
from math import pi, sqrt, tan
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(UpperCamelCase_ , 2 ) * torus_radius * tube_radius
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print('''\nSurface Areas of various geometric shapes: \n''')
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 340 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 1 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
def _a ( UpperCamelCase_ : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for item in point:
if not isinstance(UpperCamelCase_ , (int, float) ):
lowerCAmelCase__ = (
"Expected a list of numbers as input, found "
F"{type(UpperCamelCase_ ).__name__}"
)
raise TypeError(UpperCamelCase_ )
else:
lowerCAmelCase__ = F"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}"
raise TypeError(UpperCamelCase_ )
else:
raise ValueError("Missing an input" )
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
lowerCAmelCase__ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16000,
"return_attention_mask": False,
"do_normalize": True,
}
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
# load decoder from hub
lowerCAmelCase__ = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__UpperCAmelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = floats_list((3, 1000) )
lowerCAmelCase__ = feature_extractor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = "This is a test string"
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 )-> List[Any]:
'''simple docstring'''
np.random.seed(__UpperCAmelCase )
return np.random.rand(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCAmelCase__ = processor.decode(__UpperCAmelCase )
lowerCAmelCase__ = decoder.decode_beams(__UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
else:
with get_context(__UpperCAmelCase ).Pool() as pool:
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = list(__UpperCAmelCase )
with get_context("fork" ).Pool() as p:
lowerCAmelCase__ = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCAmelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = self._get_dummy_logits()
lowerCAmelCase__ = 15
lowerCAmelCase__ = -20.0
lowerCAmelCase__ = -4.0
lowerCAmelCase__ = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ = decoded_processor_out.text
lowerCAmelCase__ = list(__UpperCAmelCase )
with get_context("fork" ).Pool() as pool:
lowerCAmelCase__ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
lowerCAmelCase__ = [d[0][0] for d in decoded_decoder_out]
lowerCAmelCase__ = [d[0][2] for d in decoded_decoder_out]
lowerCAmelCase__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __UpperCAmelCase )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __UpperCAmelCase , atol=1E-3 ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
lowerCAmelCase__ = self._get_dummy_logits()
lowerCAmelCase__ = 2.0
lowerCAmelCase__ = 5.0
lowerCAmelCase__ = -20.0
lowerCAmelCase__ = True
lowerCAmelCase__ = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
lowerCAmelCase__ = decoded_processor_out.text
lowerCAmelCase__ = list(__UpperCAmelCase )
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context("fork" ).Pool() as pool:
lowerCAmelCase__ = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
lowerCAmelCase__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __UpperCAmelCase )
lowerCAmelCase__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCAmelCase__ = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = snapshot_download("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = processor.decoder.model_container[processor.decoder._model_key]
lowerCAmelCase__ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCAmelCase__ = os.listdir(__UpperCAmelCase )
lowerCAmelCase__ = os.listdir(__UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = floats_list((3, 1000) )
lowerCAmelCase__ = processor_wavaveca(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor_auto(__UpperCAmelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowerCAmelCase__ = self._get_dummy_logits()
lowerCAmelCase__ = processor_wavaveca.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = processor_auto.batch_decode(__UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_decoder()
lowerCAmelCase__ = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = self._get_dummy_logits()[0]
lowerCAmelCase__ = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCAmelCase__ = self._get_dummy_logits()
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
import torch
lowerCAmelCase__ = load_dataset("common_voice" , "en" , split="train" , streaming=__UpperCAmelCase )
lowerCAmelCase__ = ds.cast_column("audio" , datasets.Audio(sampling_rate=16000 ) )
lowerCAmelCase__ = iter(__UpperCAmelCase )
lowerCAmelCase__ = next(__UpperCAmelCase )
lowerCAmelCase__ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
lowerCAmelCase__ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCAmelCase__ = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCAmelCase ).logits.cpu().numpy()
lowerCAmelCase__ = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase )
lowerCAmelCase__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCAmelCase__ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
lowerCAmelCase__ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) , __UpperCAmelCase )
self.assertEqual(" ".join(self.get_from_offsets(__UpperCAmelCase , "word" ) ) , output.text )
# output times
lowerCAmelCase__ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "start_time" ) )
lowerCAmelCase__ = torch.tensor(self.get_from_offsets(__UpperCAmelCase , "end_time" ) )
# fmt: off
lowerCAmelCase__ = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
lowerCAmelCase__ = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def UpperCAmelCase ( self , __UpperCAmelCase = "auto" )-> List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 50 , __UpperCAmelCase = 7.5 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = len(__UpperCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCAmelCase )}." )
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCAmelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowerCAmelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = text_embeddings.shape
lowerCAmelCase__ = text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
lowerCAmelCase__ = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = [""]
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="
F" {type(__UpperCAmelCase )}." )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = text_input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = uncond_embeddings.shape[1]
lowerCAmelCase__ = uncond_embeddings.repeat(__UpperCAmelCase , __UpperCAmelCase , 1 )
lowerCAmelCase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ = torch.randn(
__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(self.device )
lowerCAmelCase__ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="cpu" , dtype=__UpperCAmelCase ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(
__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
lowerCAmelCase__ = torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase__ = latents_reference.to(self.device )
lowerCAmelCase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowerCAmelCase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowerCAmelCase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowerCAmelCase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowerCAmelCase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowerCAmelCase__ = 0 if dx < 0 else dx
lowerCAmelCase__ = 0 if dy < 0 else dy
lowerCAmelCase__ = max(-dx , 0 )
lowerCAmelCase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowerCAmelCase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
lowerCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = 1 / 0.18_215 * latents
lowerCAmelCase__ = self.vae.decode(__UpperCAmelCase ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowerCAmelCase__ = self.feature_extractor(self.numpy_to_pil(__UpperCAmelCase ) , return_tensors="pt" ).to(
self.device )
lowerCAmelCase__ , lowerCAmelCase__ = self.safety_checker(
images=__UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowerCAmelCase__ = None
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = torch.device('''cpu''')
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
def _a ( UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for k in state_dict.keys():
lowerCAmelCase__ = k
if ".pwconv" in k:
lowerCAmelCase__ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
lowerCAmelCase__ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
lowerCAmelCase__ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
lowerCAmelCase__ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
lowerCAmelCase__ = k_new.split("." )
if ls[2].isdigit():
lowerCAmelCase__ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
lowerCAmelCase__ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase__ = [3, 3, 6, 4]
lowerCAmelCase__ = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase__ = [3, 3, 9, 6]
lowerCAmelCase__ = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase__ = [4, 3, 10, 5]
lowerCAmelCase__ = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase__ = [4, 4, 12, 6]
lowerCAmelCase__ = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" , check_hash=UpperCamelCase_ )
else:
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# load HuggingFace model
lowerCAmelCase__ = SwiftFormerForImageClassification(UpperCamelCase_ ).eval()
hf_model.load_state_dict(UpperCamelCase_ )
# prepare test inputs
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("preprocessor_config" )
lowerCAmelCase__ = processor(images=UpperCamelCase_ , return_tensors="pt" )
# compare outputs from both models
lowerCAmelCase__ = get_expected_output(UpperCamelCase_ )
lowerCAmelCase__ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase_ , atol=1e-3 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
a_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _a ( UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if args.student_type == "roberta":
lowerCAmelCase__ = False
elif args.student_type == "gpt2":
lowerCAmelCase__ = False
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> str:
"""simple docstring"""
if args.student_type == "roberta":
lowerCAmelCase__ = False
def _a ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=UpperCamelCase_ , choices=["distilbert", "roberta", "gpt2"] , required=UpperCamelCase_ , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=UpperCamelCase_ , type=UpperCamelCase_ , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=UpperCamelCase_ , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=UpperCamelCase_ , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=UpperCamelCase_ , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=UpperCamelCase_ , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=UpperCamelCase_ , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=UpperCamelCase_ , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=UpperCamelCase_ , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=UpperCamelCase_ , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=UpperCamelCase_ , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=UpperCamelCase_ , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=UpperCamelCase_ , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=UpperCamelCase_ , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=UpperCamelCase_ , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=UpperCamelCase_ , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=UpperCamelCase_ , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCamelCase_ , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=UpperCamelCase_ , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase_ , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=UpperCamelCase_ , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=UpperCamelCase_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=UpperCamelCase_ , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=UpperCamelCase_ , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=UpperCamelCase_ , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=UpperCamelCase_ , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=UpperCamelCase_ , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=UpperCamelCase_ , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=UpperCamelCase_ , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=UpperCamelCase_ , default=4_000 , help="Checkpoint interval." )
lowerCAmelCase__ = parser.parse_args()
sanity_checks(UpperCamelCase_ )
# ARGS #
init_gpu_params(UpperCamelCase_ )
set_seed(UpperCamelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(UpperCamelCase_ ) , UpperCamelCase_ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = MODEL_CLASSES[args.student_type]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase__ = tokenizer.all_special_tokens.index(UpperCamelCase_ )
lowerCAmelCase__ = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
lowerCAmelCase__ = special_tok_ids
lowerCAmelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , "rb" ) as fp:
lowerCAmelCase__ = pickle.load(UpperCamelCase_ )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , "rb" ) as fp:
lowerCAmelCase__ = pickle.load(UpperCamelCase_ )
lowerCAmelCase__ = np.maximum(UpperCamelCase_ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase__ = 0.0 # do not predict special tokens
lowerCAmelCase__ = torch.from_numpy(UpperCamelCase_ )
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = LmSeqsDataset(params=UpperCamelCase_ , data=UpperCamelCase_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
lowerCAmelCase__ = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
lowerCAmelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase_ )
else:
lowerCAmelCase__ = student_model_class(UpperCamelCase_ )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info("Student loaded." )
# TEACHER #
lowerCAmelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase_ )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase_ , UpperCamelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase_ , UpperCamelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase__ = Distiller(
params=UpperCamelCase_ , dataset=UpperCamelCase_ , token_probs=UpperCamelCase_ , student=UpperCamelCase_ , teacher=UpperCamelCase_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if num < 0:
return False
lowerCAmelCase__ = num
lowerCAmelCase__ = 0
while num > 0:
lowerCAmelCase__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _a ( UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
return 1.0 / (1.0 + np.exp(-_outputs ))
def _a ( UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = np.max(_outputs , axis=-1 , keepdims=UpperCamelCase_ )
lowerCAmelCase__ = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
a_ ="""sigmoid"""
a_ ="""softmax"""
a_ ="""none"""
@add_end_docstrings(
_UpperCAmelCase, r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""", )
class lowercase__ ( _UpperCAmelCase ):
a_ =False
a_ =ClassificationFunction.NONE
def __init__( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="" , **__UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = tokenizer_kwargs
lowerCAmelCase__ = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowerCAmelCase__ = self.model.config.return_all_scores
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or top_k is None:
lowerCAmelCase__ = top_k
lowerCAmelCase__ = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __UpperCAmelCase , )
if return_all_scores:
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = 1
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase__ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase__ = "top_k" not in kwargs
if isinstance(args[0] , __UpperCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase )-> Dict[str, GenericTensor]:
'''simple docstring'''
lowerCAmelCase__ = self.framework
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1 and isinstance(inputs[0] , __UpperCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
return self.model(**__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=True )-> Dict:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase__ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase__ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowerCAmelCase__ = self.model.config.function_to_apply
else:
lowerCAmelCase__ = ClassificationFunction.NONE
lowerCAmelCase__ = model_outputs["logits"][0]
lowerCAmelCase__ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase__ = sigmoid(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase__ = softmax(__UpperCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase__ = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase__ = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__UpperCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )
if top_k is not None:
lowerCAmelCase__ = dict_scores[:top_k]
return dict_scores
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowercase__ :
a_ =42
a_ =None
a_ =None
a_ =None
a_ =None
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _str_to_version_tuple(self.version_str )
def __repr__( self )-> Optional[Any]:
'''simple docstring'''
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return self.major, self.minor, self.patch
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return other
raise TypeError(F"{other} (type {type(__UpperCAmelCase )}) cannot be compared to version." )
def __eq__( self , __UpperCAmelCase )-> str:
'''simple docstring'''
try:
lowerCAmelCase__ = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self )-> List[str]:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return self.version_str
def _a ( UpperCamelCase_ : Any ) -> int:
"""simple docstring"""
lowerCAmelCase__ = _VERSION_REG.match(UpperCamelCase_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(UpperCamelCase_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def _a ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(UpperCamelCase_ ) for v in version_tuple )
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
import math
from datetime import datetime, timedelta
def _a ( UpperCamelCase_ : int ) -> datetime:
"""simple docstring"""
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
a_ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
from __future__ import annotations
a_ = 10
def _a ( UpperCamelCase_ : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = max(UpperCamelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase__ = [[] for _ in range(UpperCamelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase__ = 0
for b in range(UpperCamelCase_ ):
for i in buckets[b]:
lowerCAmelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowercase__ ( nn.Module ):
a_ =42
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = jax.image.resize(
__UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
lowerCAmelCase__ = self.conv(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.conv(__UpperCAmelCase )
return hidden_states
class lowercase__ ( nn.Module ):
a_ =42
a_ =None
a_ =0.0
a_ =None
a_ =jnp.floataa
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase__ = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ = nn.Dense(__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowerCAmelCase__ = nn.Dropout(self.dropout_prob )
lowerCAmelCase__ = nn.Conv(
__UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase__ = None
if use_nin_shortcut:
lowerCAmelCase__ = nn.Conv(
__UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = self.norma(__UpperCAmelCase )
lowerCAmelCase__ = nn.swish(__UpperCAmelCase )
lowerCAmelCase__ = self.conva(__UpperCAmelCase )
lowerCAmelCase__ = self.time_emb_proj(nn.swish(__UpperCAmelCase ) )
lowerCAmelCase__ = jnp.expand_dims(jnp.expand_dims(__UpperCAmelCase , 1 ) , 1 )
lowerCAmelCase__ = hidden_states + temb
lowerCAmelCase__ = self.norma(__UpperCAmelCase )
lowerCAmelCase__ = nn.swish(__UpperCAmelCase )
lowerCAmelCase__ = self.dropout(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self.conva(__UpperCAmelCase )
if self.conv_shortcut is not None:
lowerCAmelCase__ = self.conv_shortcut(__UpperCAmelCase )
return hidden_states + residual
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
a_ = {}
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase__ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase__ = _calculate(days - 1 , UpperCamelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase__ = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase__ = _calculate(days - 1 , UpperCamelCase_ , 0 )
lowerCAmelCase__ = state_late + state_absent + state_ontime
lowerCAmelCase__ = prizestrings
return prizestrings
def _a ( UpperCamelCase_ : int = 30 ) -> int:
"""simple docstring"""
return _calculate(UpperCamelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = tokenizer(example["content"] , truncation=UpperCamelCase_ )["input_ids"]
lowerCAmelCase__ = len(example["content"] ) / len(output["input_ids"] )
return output
a_ = HfArgumentParser(PretokenizationArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
a_ = time.time()
a_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
a_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowercase__ :
a_ =XGLMConfig
a_ ={}
a_ ="""gelu"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.02 , )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = ffn_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a_ =(TFXGLMForCausalLM,) if is_tf_available() else ()
a_ =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = TFXGLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self , __UpperCAmelCase=True )-> Any:
'''simple docstring'''
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
lowerCAmelCase__ = tokenizer("Today is a nice day and" , return_tensors="tf" )
lowerCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
lowerCAmelCase__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase__ = "left"
# use different length sentences to test batching
lowerCAmelCase__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="tf" , padding=__UpperCAmelCase )
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""input_values""", """padding_mask"""]
def __init__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 24000 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = chunk_length_s
lowerCAmelCase__ = overlap
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , )-> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
lowerCAmelCase__ = True
lowerCAmelCase__ = bool(
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowerCAmelCase__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
lowerCAmelCase__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray(__UpperCAmelCase ).T]
# verify inputs are valid
for idx, example in enumerate(__UpperCAmelCase ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
lowerCAmelCase__ = None
lowerCAmelCase__ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCAmelCase__ = min(array.shape[0] for array in raw_audio )
lowerCAmelCase__ = int(np.floor(max_length / self.chunk_stride ) )
lowerCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCAmelCase__ = max(array.shape[0] for array in raw_audio )
lowerCAmelCase__ = int(np.ceil(max_length / self.chunk_stride ) )
lowerCAmelCase__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCAmelCase__ = "max_length"
else:
lowerCAmelCase__ = input_values
# normal padding on batch
if padded_inputs is None:
lowerCAmelCase__ = self.pad(
__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , padding=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
if padding:
lowerCAmelCase__ = padded_inputs.pop("attention_mask" )
lowerCAmelCase__ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
lowerCAmelCase__ = example[..., None]
input_values.append(example.T )
lowerCAmelCase__ = input_values
if return_tensors is not None:
lowerCAmelCase__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=7 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = None
if token is not None:
lowerCAmelCase__ = {"Accept": "application/vnd.github+json", "Authorization": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
lowerCAmelCase__ = "636036"
lowerCAmelCase__ = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
lowerCAmelCase__ = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
return result["workflow_runs"]
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_daily_ci_runs(UpperCamelCase_ )
lowerCAmelCase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCAmelCase__ = workflow_run["id"]
break
return workflow_run_id
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = get_last_daily_ci_runs(UpperCamelCase_ )
if workflow_run_id is not None:
lowerCAmelCase__ = get_artifacts_links(worflow_run_id=UpperCamelCase_ , token=UpperCamelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCAmelCase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCamelCase_ , artifact_url=UpperCamelCase_ , output_dir=UpperCamelCase_ , token=UpperCamelCase_ )
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
for artifact_name in artifact_names:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
if os.path.isfile(UpperCamelCase_ ):
lowerCAmelCase__ = {}
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
with z.open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().decode("UTF-8" )
return results
| 340 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def _a ( UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCAmelCase__ = [144, 192, 240]
lowerCAmelCase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCAmelCase__ = [96, 120, 144]
lowerCAmelCase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCAmelCase__ = [64, 80, 96]
lowerCAmelCase__ = [16, 16, 24, 48, 64, 80, 320]
lowerCAmelCase__ = 0.05
lowerCAmelCase__ = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
lowerCAmelCase__ = 512
lowerCAmelCase__ = 16
lowerCAmelCase__ = 21
lowerCAmelCase__ = "pascal-voc-id2label.json"
else:
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = "huggingface/label-files"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
return config
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=False ) -> List[str]:
"""simple docstring"""
for i in range(1 , 6 ):
if F"layer_{i}." in name:
lowerCAmelCase__ = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
lowerCAmelCase__ = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
lowerCAmelCase__ = name.replace(".block." , "." )
if "exp_1x1" in name:
lowerCAmelCase__ = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
lowerCAmelCase__ = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
lowerCAmelCase__ = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
lowerCAmelCase__ = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
lowerCAmelCase__ = name.replace(".norm." , ".normalization." )
if ".conv." in name:
lowerCAmelCase__ = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
lowerCAmelCase__ = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
lowerCAmelCase__ = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
lowerCAmelCase__ = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
lowerCAmelCase__ = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
lowerCAmelCase__ = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
lowerCAmelCase__ = name.replace(F".global_rep.{i}.weight" , ".layernorm.weight" )
if F".global_rep.{i}.bias" in name:
lowerCAmelCase__ = name.replace(F".global_rep.{i}.bias" , ".layernorm.bias" )
if ".global_rep." in name:
lowerCAmelCase__ = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
lowerCAmelCase__ = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
lowerCAmelCase__ = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
lowerCAmelCase__ = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
lowerCAmelCase__ = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
lowerCAmelCase__ = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
lowerCAmelCase__ = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
lowerCAmelCase__ = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
lowerCAmelCase__ = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
lowerCAmelCase__ = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
lowerCAmelCase__ = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
lowerCAmelCase__ = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
lowerCAmelCase__ = "mobilevit." + name
return name
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict=False ) -> Dict:
"""simple docstring"""
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "mobilevit."
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if key[:8] == "encoder.":
lowerCAmelCase__ = key[8:]
if "qkv" in key:
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[0][6:] ) - 1
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
lowerCAmelCase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCAmelCase__ = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = val
return orig_state_dict
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = get_mobilevit_config(UpperCamelCase_ )
# load original state_dict
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
lowerCAmelCase__ = MobileViTForSemanticSegmentation(UpperCamelCase_ ).eval()
else:
lowerCAmelCase__ = MobileViTForImageClassification(UpperCamelCase_ ).eval()
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCAmelCase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCAmelCase__ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCAmelCase__ = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCAmelCase__ = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCAmelCase__ = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCAmelCase__ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowerCAmelCase__ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowerCAmelCase__ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
lowerCAmelCase__ = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
lowerCAmelCase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCamelCase_ , organization="apple" )
model.push_to_hub(UpperCamelCase_ , organization="apple" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""megatron-bert"""
def __init__( self , __UpperCAmelCase=29056 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , **__UpperCAmelCase , )-> Dict:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowercase__ ( _UpperCAmelCase ):
a_ =42
a_ =None
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=0.999 , UpperCamelCase_ : Tuple="cosine" , ) -> Union[str, Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = i / num_diffusion_timesteps
lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = "fixed_small_log" , __UpperCAmelCase = True , __UpperCAmelCase = 1.0 , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = "squaredcos_cap_v2" , )-> Tuple:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
lowerCAmelCase__ = betas_for_alpha_bar(__UpperCAmelCase )
lowerCAmelCase__ = 1.0 - self.betas
lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 )
lowerCAmelCase__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
lowerCAmelCase__ = 1.0
# setable values
lowerCAmelCase__ = None
lowerCAmelCase__ = torch.from_numpy(np.arange(0 , __UpperCAmelCase )[::-1].copy() )
lowerCAmelCase__ = variance_type
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> torch.FloatTensor:
'''simple docstring'''
return sample
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Any:
'''simple docstring'''
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowerCAmelCase__ = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
lowerCAmelCase__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None )-> int:
'''simple docstring'''
if prev_timestep is None:
lowerCAmelCase__ = t - 1
lowerCAmelCase__ = self.alphas_cumprod[t]
lowerCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase__ = 1 - alpha_prod_t
lowerCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase__ = self.betas[t]
else:
lowerCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowerCAmelCase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowerCAmelCase__ = torch.log(torch.clamp(__UpperCAmelCase , min=1E-2_0 ) )
lowerCAmelCase__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowerCAmelCase__ = variance.log()
lowerCAmelCase__ = beta.log()
lowerCAmelCase__ = (predicted_variance + 1) / 2
lowerCAmelCase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase = True , )-> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowerCAmelCase__ , lowerCAmelCase__ = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1 )
else:
lowerCAmelCase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
lowerCAmelCase__ = t - 1
lowerCAmelCase__ = self.alphas_cumprod[t]
lowerCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowerCAmelCase__ = 1 - alpha_prod_t
lowerCAmelCase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowerCAmelCase__ = self.betas[t]
lowerCAmelCase__ = self.alphas[t]
else:
lowerCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev
lowerCAmelCase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase__ = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase__ = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowerCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase__ = 0
if t > 0:
lowerCAmelCase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device )
lowerCAmelCase__ = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
lowerCAmelCase__ = variance
elif self.variance_type == "learned_range":
lowerCAmelCase__ = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler." )
lowerCAmelCase__ = variance * variance_noise
lowerCAmelCase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> torch.FloatTensor:
'''simple docstring'''
lowerCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
lowerCAmelCase__ = timesteps.to(original_samples.device )
lowerCAmelCase__ = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 )
lowerCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
lowerCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
lowerCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , )-> int:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = MaskFormerSwinModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = MaskFormerSwinBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = ["stem"]
lowerCAmelCase__ = MaskFormerSwinBackbone(config=__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a_ ={"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a_ =False
a_ =False
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = MaskFormerSwinModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swin has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCAmelCase ):
lowerCAmelCase__ = 0
return t
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
with torch.no_grad():
lowerCAmelCase__ = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has"
F" `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}."
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
@require_torch
class lowercase__ ( unittest.TestCase, _UpperCAmelCase ):
a_ =(MaskFormerSwinBackbone,) if is_torch_available() else ()
a_ =MaskFormerSwinConfig
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = MaskFormerSwinModelTester(self )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase__ = backbone_class(__UpperCAmelCase )
backbone.to(__UpperCAmelCase )
backbone.eval()
lowerCAmelCase__ = backbone(**__UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase__ = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase__ = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase_ , default=0 , help="cuda_id." , )
lowerCAmelCase__ = parser.parse_args()
return args
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
if not len(UpperCamelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCAmelCase__ , lowerCAmelCase__ = imgs[0].size
lowerCAmelCase__ = Image.new("RGB" , size=(cols * w, rows * h) )
lowerCAmelCase__ , lowerCAmelCase__ = grid.size
for i, img in enumerate(UpperCamelCase_ ):
grid.paste(UpperCamelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict="robotic cat with wings" , UpperCamelCase_ : Optional[int]=7.5 , UpperCamelCase_ : Any=50 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : List[Any]=42 , ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase_ )
lowerCAmelCase__ = pipeline(
UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , ).images
lowerCAmelCase__ = int(math.sqrt(UpperCamelCase_ ) )
lowerCAmelCase__ = image_grid(UpperCamelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a_ = parse_args()
# Load models and create wrapper for stable diffusion
a_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
a_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
a_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
a_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
a_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
a_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
a_ = unet.to(torch.device('''cuda''', args.cuda_id))
a_ = pipeline.to(unet.device)
a_, a_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
a_ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = Mock()
lowerCAmelCase__ = conn, Mock()
lowerCAmelCase__ = iter([1, None] )
lowerCAmelCase__ = lambda UpperCamelCase_ : next(UpperCamelCase_ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=UpperCamelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 100 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , )-> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
lowerCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
lowerCAmelCase__ = int(__UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowerCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process." )
lowerCAmelCase__ = int(__UpperCAmelCase )
lowerCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCAmelCase__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase , device=audio.device )
lowerCAmelCase__ = self.scheduler.timesteps.to(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
lowerCAmelCase__ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCAmelCase )
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
import re
def _a ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , UpperCamelCase_ ) ) != len(UpperCamelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=_UpperCAmelCase ):
a_ =["""torch""", """torchsde"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : PreTrainedTokenizer , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] = None , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = {}
if train_file is not None:
lowerCAmelCase__ = [train_file]
if eval_file is not None:
lowerCAmelCase__ = [eval_file]
if test_file is not None:
lowerCAmelCase__ = [test_file]
lowerCAmelCase__ = datasets.load_dataset("csv" , data_files=UpperCamelCase_ )
lowerCAmelCase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowerCAmelCase__ = features_name.pop(UpperCamelCase_ )
lowerCAmelCase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCAmelCase__ = {label: i for i, label in enumerate(UpperCamelCase_ )}
lowerCAmelCase__ = tokenizer.model_input_names
lowerCAmelCase__ = {}
if len(UpperCamelCase_ ) == 1:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda UpperCamelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" ) , batched=UpperCamelCase_ , )
elif len(UpperCamelCase_ ) == 2:
for k in files.keys():
lowerCAmelCase__ = ds[k].map(
lambda UpperCamelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , padding="max_length" , ) , batched=UpperCamelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCAmelCase__ = {k: v for k, v in ex.items() if k in input_names}
lowerCAmelCase__ = labelaid[ex[label_name]]
yield (d, label)
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCAmelCase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCAmelCase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCAmelCase__ = (
tf.data.Dataset.from_generator(
UpperCamelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCAmelCase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
a_ =field(metadata={"""help""": """Which column contains the label"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the training file"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the development file"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """The path of the test file"""} )
a_ =field(
default=128, metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
}, )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowercase__ :
a_ =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""}, )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase_ ) , labelaid=UpperCamelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase_ : EvalPrediction ) -> Dict:
lowerCAmelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCAmelCase__ = TFTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , compute_metrics=UpperCamelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(UpperCamelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(UpperCamelCase_ )
return results
if __name__ == "__main__":
main()
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a_ = '''__DUMMY_TRANSFORMERS_USER__'''
a_ = '''Dummy User'''
a_ = '''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'''
a_ = '''https://hub-ci.huggingface.co'''
a_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
a_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
a_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def _a ( UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , UpperCamelCase_ )
@pytest.fixture
def _a ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , UpperCamelCase_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , UpperCamelCase_ )
@pytest.fixture
def _a ( UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , UpperCamelCase_ )
@pytest.fixture
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
HfFolder.save_token(UpperCamelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def _a ( ) -> Tuple:
"""simple docstring"""
return HfApi(endpoint=UpperCamelCase_ )
@pytest.fixture(scope="session" )
def _a ( UpperCamelCase_ : HfApi ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_ )
@pytest.fixture
def _a ( UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
def _cleanup_repo(UpperCamelCase_ : str ):
hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def _a ( UpperCamelCase_ : int ) -> List[str]:
"""simple docstring"""
@contextmanager
def _temporary_repo(UpperCamelCase_ : int ):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def _a ( UpperCamelCase_ : HfApi , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCAmelCase__ = F"repo_txt_data-{int(time.time() * 1_0e3 )}"
lowerCAmelCase__ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" , private=UpperCamelCase_ )
hf_api.upload_file(
token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=UpperCamelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def _a ( UpperCamelCase_ : HfApi , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = F"repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"
lowerCAmelCase__ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" , private=UpperCamelCase_ )
hf_api.upload_file(
token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo="data.zip" , repo_id=UpperCamelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def _a ( UpperCamelCase_ : HfApi , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = F"repo_zipped_img_data-{int(time.time() * 1_0e3 )}"
lowerCAmelCase__ = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" , private=UpperCamelCase_ )
hf_api.upload_file(
token=UpperCamelCase_ , path_or_fileobj=str(UpperCamelCase_ ) , path_in_repo="data.zip" , repo_id=UpperCamelCase_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_ , token=UpperCamelCase_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =GPTSwaTokenizer
a_ =False
a_ =True
a_ =False
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = GPTSwaTokenizer(__UpperCAmelCase , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a test"
lowerCAmelCase__ = "This is a test"
return input_text, output_text
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = "<s>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__UpperCAmelCase ) , 2000 )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = GPTSwaTokenizer(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [465, 287, 265, 631, 842] )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = GPTSwaTokenizer(__UpperCAmelCase )
lowerCAmelCase__ = ["This is a test", "I was born in 92000, and this is falsé."]
lowerCAmelCase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(__UpperCAmelCase ) , __UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(__UpperCAmelCase ) , __UpperCAmelCase )
@slow
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
lowerCAmelCase__ = {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="AI-Sweden/gpt-sw3-126m" , sequences=__UpperCAmelCase , )
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
import torch
def _a ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase__ = torch.cuda.device_count()
else:
lowerCAmelCase__ = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _a ( UpperCamelCase_ : int = 100 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase__ = pre_numerator
lowerCAmelCase__ = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase__ = cur_numerator
lowerCAmelCase__ = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase_ )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a ( UpperCamelCase_ : Features ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = np.inf
def set_batch_size(UpperCamelCase_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and feature.dtype == "binary":
lowerCAmelCase__ = min(UpperCamelCase_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(UpperCamelCase_ , UpperCamelCase_ )
return None if batch_size is np.inf else batch_size
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
lowerCAmelCase__ = _PACKAGED_DATASETS_MODULES["parquet"][1]
lowerCAmelCase__ = Parquet(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = dataset
lowerCAmelCase__ = path_or_buf
lowerCAmelCase__ = batch_size or get_writer_batch_size(dataset.features )
lowerCAmelCase__ = parquet_writer_kwargs
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
lowerCAmelCase__ = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
else:
lowerCAmelCase__ = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = parquet_writer_kwargs.pop("path_or_buf" , __UpperCAmelCase )
lowerCAmelCase__ = self.dataset.features.arrow_schema
lowerCAmelCase__ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
lowerCAmelCase__ = query_table(
table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import math
def _a ( UpperCamelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( UpperCamelCase_ : int = 10_001 ) -> int:
"""simple docstring"""
try:
lowerCAmelCase__ = int(UpperCamelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
lowerCAmelCase__ = []
lowerCAmelCase__ = 2
while len(UpperCamelCase_ ) < nth:
if is_prime(UpperCamelCase_ ):
primes.append(UpperCamelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCamelCase_ ) - 1]
if __name__ == "__main__":
print(F"{solution() = }")
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
import argparse
a_ = '''docs/source/_static/js/custom.js'''
def _a ( UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
with open(UpperCamelCase_ , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCAmelCase__ = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(UpperCamelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
a_ = parser.parse_args()
update_custom_js(args.version)
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
def _a ( UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = 1_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
for divide_by_number in range(UpperCamelCase_ , digit + 1 ):
lowerCAmelCase__ = []
lowerCAmelCase__ = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCamelCase_ ):
lowerCAmelCase__ = len(UpperCamelCase_ )
lowerCAmelCase__ = divide_by_number
else:
has_been_divided.append(UpperCamelCase_ )
lowerCAmelCase__ = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase__ ( _UpperCAmelCase ):
@slow
@require_torch
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowerCAmelCase__ = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCAmelCase__ = bertabert.config.encoder.vocab_size
lowerCAmelCase__ = tokenizer.sep_token_id
lowerCAmelCase__ = tokenizer.cls_token_id
lowerCAmelCase__ = 128
lowerCAmelCase__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowerCAmelCase__ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowerCAmelCase__ = train_dataset.select(range(32 ) )
lowerCAmelCase__ = val_dataset.select(range(16 ) )
lowerCAmelCase__ = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase__ = tokenizer(batch["article"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=512 )
lowerCAmelCase__ = tokenizer(batch["highlights"] , padding="max_length" , truncation=__UpperCAmelCase , max_length=128 )
lowerCAmelCase__ = inputs.input_ids
lowerCAmelCase__ = inputs.attention_mask
lowerCAmelCase__ = outputs.input_ids
lowerCAmelCase__ = outputs.input_ids.copy()
lowerCAmelCase__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowerCAmelCase__ = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
lowerCAmelCase__ = pred.label_ids
lowerCAmelCase__ = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowerCAmelCase__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy="steps" , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase__ = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _a ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) )
lowerCAmelCase__ = 0.01
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
lowerCAmelCase__ = time.time()
locka.acquire(UpperCamelCase_ )
assert time.time() - _start > timeout
def _a ( UpperCamelCase_ : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = "a" * 1_000 + ".lock"
lowerCAmelCase__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(UpperCamelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCAmelCase__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
locka.acquire(0 )
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=_UpperCAmelCase ):
a_ =["""note_seq"""]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
requires_backends(cls , ["note_seq"] )
| 340 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = BlipImageProcessor()
lowerCAmelCase__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
lowerCAmelCase__ = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
lowerCAmelCase__ = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_qformer_tokenizer()
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_qformer_tokenizer()
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
lowerCAmelCase__ = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_qformer_tokenizer()
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_qformer_tokenizer()
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_qformer_tokenizer()
lowerCAmelCase__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
a_ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609344,
"knot": 1.852,
}
a_ = {
"km/h": 1.0,
"m/s": 0.277777778,
"mph": 0.621371192,
"knot": 0.539956803,
}
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
lowerCAmelCase__ = (
F"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
F"Valid values are: {', '.join(UpperCamelCase_ )}"
)
raise ValueError(UpperCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
a_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowercase__ ( _UpperCAmelCase ):
a_ =field(default=_UpperCAmelCase, metadata={"""help""": """Whether to use SortishSampler or not."""} )
a_ =field(
default=_UpperCAmelCase, metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a_ =field(
default=_UpperCAmelCase, metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
}, )
a_ =field(
default=_UpperCAmelCase, metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
}, )
a_ =field(
default=_UpperCAmelCase, metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
}, )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = v.to_dict()
return d
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 25_6047
a_ = 25_6145
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =NllbTokenizer
a_ =NllbTokenizerFast
a_ =True
a_ =True
a_ ={}
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = NllbTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = NllbTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.save_pretrained(__UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase__ = tokenizer_r.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.from_pretrained(__UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) )
shutil.rmtree(__UpperCAmelCase )
@require_torch
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
if not self.test_seqaseq:
return
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
lowerCAmelCase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
lowerCAmelCase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch(
src_texts=__UpperCAmelCase , tgt_texts=__UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch(
__UpperCAmelCase , tgt_texts=__UpperCAmelCase , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase__ = tokenizer.prepare_seqaseq_batch(
src_texts=__UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , __UpperCAmelCase )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = [AddedToken("<special>" , lstrip=__UpperCAmelCase )]
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_r.encode("Hey this is a <special> token" )
lowerCAmelCase__ = tokenizer_r.encode("<special>" , add_special_tokens=__UpperCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = tokenizer_p.encode("Hey this is a <special> token" )
lowerCAmelCase__ = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
a_ ="""facebook/nllb-200-distilled-600M"""
a_ =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a_ =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a_ =[
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def UpperCAmelCase ( cls )-> Any:
'''simple docstring'''
lowerCAmelCase__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
lowerCAmelCase__ = 1
return cls
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256057 )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase__ = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
lowerCAmelCase__ = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCAmelCase )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256203, 3] )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = NllbTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCAmelCase__ = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt" )
lowerCAmelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt" )
lowerCAmelCase__ = targets["input_ids"]
lowerCAmelCase__ = shift_tokens_right(
__UpperCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[256047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 256057,
} , )
@require_torch
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = 5
# Realm tok
lowerCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ = os.path.join(__UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
def UpperCAmelCase ( self )-> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=__UpperCAmelCase , )
return block_records
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = self.get_dummy_retriever()
lowerCAmelCase__ = retriever.tokenizer
lowerCAmelCase__ = np.array([0, 3] , dtype="long" )
lowerCAmelCase__ = tokenizer(["Test question"] ).input_ids
lowerCAmelCase__ = tokenizer(
["the fourth"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
lowerCAmelCase__ = config.reader_seq_len
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = self.get_dummy_retriever()
lowerCAmelCase__ = retriever.tokenizer
lowerCAmelCase__ = np.array([0, 3, 5] , dtype="long" )
lowerCAmelCase__ = tokenizer(["Test question"] ).input_ids
lowerCAmelCase__ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ).input_ids
lowerCAmelCase__ = config.reader_seq_len
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = retriever(
__UpperCAmelCase , __UpperCAmelCase , answer_ids=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="np" )
self.assertEqual([False, True, True] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __UpperCAmelCase )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
lowerCAmelCase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
lowerCAmelCase__ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCAmelCase__ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=2 , )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = scope
lowerCAmelCase__ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase__ = (image_size // patch_size) ** 2
lowerCAmelCase__ = num_patches + 2
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = TFDeiTModel(config=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForMaskedImageModeling(__UpperCAmelCase )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = TFDeiTForImageClassification(__UpperCAmelCase )
lowerCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = TFDeiTForImageClassification(__UpperCAmelCase )
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
a_ =(
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
a_ =(
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
a_ =False
a_ =False
a_ =False
a_ =False
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = TFDeiTModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Dense ) )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
lowerCAmelCase__ = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a_ = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
a_ = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(R".*layer_(\d*).*" , UpperCamelCase_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def _a ( UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(R"[^\d](\d+)$" , str(UpperCamelCase_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(UpperCamelCase_ )
if shard_model:
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith("layer" ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = {"weight_map": {}, "metadata": {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(UpperCamelCase_ ):
print("Processing file: {}".format(UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace("model_00" , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
UpperCamelCase_ , os.path.join(
UpperCamelCase_ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(UpperCamelCase_ ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCamelCase_ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
lowerCAmelCase__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + "\n"
f.write(UpperCamelCase_ )
else:
lowerCAmelCase__ = BloomModel(UpperCamelCase_ )
lowerCAmelCase__ = os.listdir(UpperCamelCase_ )
lowerCAmelCase__ = sorted(filter(lambda UpperCamelCase_ : s.startswith("layer" ) and "model_00" in s , UpperCamelCase_ ) )
lowerCAmelCase__ = None
for i, file in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = None
for i in range(UpperCamelCase_ ):
# load all TP files
lowerCAmelCase__ = file.replace("model_00" , F"model_0{i}" )
lowerCAmelCase__ = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , map_location="cpu" )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(UpperCamelCase_ )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=UpperCamelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCamelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowerCAmelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
a_ = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase__ = values[index] + knapsack(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
# Imports
import numpy as np
class lowercase__ :
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None )-> Tuple:
'''simple docstring'''
self.set_matricies(red=__UpperCAmelCase , green=__UpperCAmelCase , blue=__UpperCAmelCase , red_edge=__UpperCAmelCase , nir=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None )-> List[Any]:
'''simple docstring'''
if red is not None:
lowerCAmelCase__ = red
if green is not None:
lowerCAmelCase__ = green
if blue is not None:
lowerCAmelCase__ = blue
if red_edge is not None:
lowerCAmelCase__ = red_edge
if nir is not None:
lowerCAmelCase__ = nir
return True
def UpperCAmelCase ( self , __UpperCAmelCase="" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None )-> str:
'''simple docstring'''
self.set_matricies(red=__UpperCAmelCase , green=__UpperCAmelCase , blue=__UpperCAmelCase , red_edge=__UpperCAmelCase , nir=__UpperCAmelCase )
lowerCAmelCase__ = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self , __UpperCAmelCase=0.08 , __UpperCAmelCase=1.22 , __UpperCAmelCase=0.03 )-> List[str]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase ( self , __UpperCAmelCase=0.16 )-> Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self , __UpperCAmelCase=0.5 )-> Any:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None )-> Union[str, Any]:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase__ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""camembert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = position_embedding_type
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = classifier_dropout
class lowercase__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 340 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__UpperCAmelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase__ = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
lowerCAmelCase__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type="v_prediction" )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=350 , )
lowerCAmelCase__ = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ = "A painting of a squirrel eating a burger"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , ).images
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = "a cat sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = "a cat sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
lowerCAmelCase__ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = "a cat sitting on a park bench"
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="np" , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 340 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a_ = '''src/transformers'''
a_ = '''docs/source/en/tasks'''
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
# Find the start prompt.
lowerCAmelCase__ = 0
while not lines[start_index].startswith(UpperCamelCase_ ):
start_index += 1
start_index += 1
lowerCAmelCase__ = start_index
while not lines[end_index].startswith(UpperCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a_ = direct_transformers_import(TRANSFORMERS_PATH)
a_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def _a ( UpperCamelCase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(UpperCamelCase_ , set() )
lowerCAmelCase__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = _find_text_in_file(
filename=os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
lowerCAmelCase__ = get_model_list_for_task(UpperCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
" to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 340 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
a_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
a_ = '''▁'''
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ ="""left"""
a_ =XLNetTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , **__UpperCAmelCase , )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = 3
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = remove_space
lowerCAmelCase__ = keep_accents
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 340 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
lowerCAmelCase__ = TOKENIZER_CLASSES
else:
lowerCAmelCase__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
lowerCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
lowerCAmelCase__ = True
if checkpoint_name is None:
lowerCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCAmelCase__ = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
lowerCAmelCase__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCAmelCase__ , lowerCAmelCase__ = checkpoint.split("/" )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
lowerCAmelCase__ = checkpoint
lowerCAmelCase__ = dump_path
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCAmelCase__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
lowerCAmelCase__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(UpperCamelCase_ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will "
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 340 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str]=1_024 , UpperCamelCase_ : Dict=1_024 , UpperCamelCase_ : List[str]=False , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="train" , **UpperCamelCase_ )
lowerCAmelCase__ = tok.pad_token_id
def get_lens(UpperCamelCase_ : str ):
lowerCAmelCase__ = tqdm(
DataLoader(UpperCamelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCamelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCAmelCase__ = []
for batch in dl:
lowerCAmelCase__ = batch["input_ids"].ne(UpperCamelCase_ ).sum(1 ).tolist()
lowerCAmelCase__ = batch["labels"].ne(UpperCamelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCamelCase_ , UpperCamelCase_ ):
max_lens.append(max(UpperCamelCase_ , UpperCamelCase_ ) )
else:
max_lens.extend(UpperCamelCase_ )
return max_lens
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
lowerCAmelCase__ = SeqaSeqDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , type_path="val" , **UpperCamelCase_ )
lowerCAmelCase__ = get_lens(UpperCamelCase_ )
pickle_save(UpperCamelCase_ , train_ds.len_file )
pickle_save(UpperCamelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 340 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = OmegaConf.load(UpperCamelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(UpperCamelCase_ ) ) )
return config
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None ) -> List[str]:
"""simple docstring"""
if conf_path is None:
lowerCAmelCase__ = "./model_checkpoints/vqgan_only.yaml"
lowerCAmelCase__ = load_config(UpperCamelCase_ , display=UpperCamelCase_ )
lowerCAmelCase__ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase__ = "./model_checkpoints/vqgan_only.pt"
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location=UpperCamelCase_ )
if ".ckpt" in ckpt_path:
lowerCAmelCase__ = sd["state_dict"]
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.to(UpperCamelCase_ )
del sd
return model
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = model.encode(UpperCamelCase_ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
lowerCAmelCase__ = model.decode(UpperCamelCase_ )
return xrec
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=False ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = string.rsplit("." , 1 )
if reload:
lowerCAmelCase__ = importlib.import_module(UpperCamelCase_ )
importlib.reload(UpperCamelCase_ )
return getattr(importlib.import_module(UpperCamelCase_ , package=UpperCamelCase_ ) , cls )
def _a ( UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : str=True , UpperCamelCase_ : str=True ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = instantiate_from_config(UpperCamelCase_ )
if sd is not None:
model.load_state_dict(UpperCamelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Any:
"""simple docstring"""
if ckpt:
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )
lowerCAmelCase__ = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
lowerCAmelCase__ = {"state_dict": None}
lowerCAmelCase__ = None
lowerCAmelCase__ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=UpperCamelCase_ , eval_mode=UpperCamelCase_ )["model"]
return model, global_step
| 340 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class lowercase__ ( _UpperCAmelCase ):
a_ ="""xlnet"""
a_ =["""mems"""]
a_ ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=32000 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=True , __UpperCAmelCase="bi" , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=False , __UpperCAmelCase="last" , __UpperCAmelCase=True , __UpperCAmelCase="tanh" , __UpperCAmelCase=0.1 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
lowerCAmelCase__ = d_model // n_head
lowerCAmelCase__ = ff_activation
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = dropout
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = reuse_len
lowerCAmelCase__ = bi_data
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_last_dropout
lowerCAmelCase__ = start_n_top
lowerCAmelCase__ = end_n_top
lowerCAmelCase__ = bos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs["use_cache"]
lowerCAmelCase__ = use_mems_eval
lowerCAmelCase__ = use_mems_train
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 340 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase__ ( unittest.TestCase ):
a_ =MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase__ = VideoClassificationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase , top_k=2 )
lowerCAmelCase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
for example in examples:
lowerCAmelCase__ = video_classifier(__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{"score": ANY(__UpperCAmelCase ), "label": ANY(__UpperCAmelCase )},
{"score": ANY(__UpperCAmelCase ), "label": ANY(__UpperCAmelCase )},
] , )
@require_torch
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCAmelCase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCAmelCase__ = pipeline(
"video-classification" , model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , frame_sampling_rate=4 )
lowerCAmelCase__ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCAmelCase__ = video_classifier(__UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}] , )
lowerCAmelCase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=4 ) , [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
] , )
@require_tf
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
pass
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = load_image(__UpperCAmelCase )
lowerCAmelCase__ = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ = candidate_labels
lowerCAmelCase__ = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels]
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework , padding=__UpperCAmelCase )
lowerCAmelCase__ = [text_inputs]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = model_inputs.pop("candidate_labels" )
lowerCAmelCase__ = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , __UpperCAmelCase ):
lowerCAmelCase__ = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ = text_inputs[0][0]
lowerCAmelCase__ = self.model(**__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = model_outputs.pop("candidate_labels" )
lowerCAmelCase__ = model_outputs["logits"][0]
if self.framework == "pt":
lowerCAmelCase__ = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ = probs.tolist()
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [scores]
elif self.framework == "tf":
lowerCAmelCase__ = stable_softmax(__UpperCAmelCase , axis=-1 )
lowerCAmelCase__ = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
lowerCAmelCase__ = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(__UpperCAmelCase , __UpperCAmelCase ) , key=lambda __UpperCAmelCase : -x[0] )
]
return result
| 340 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a_ = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
a_ ="""linear"""
a_ ="""cosine"""
a_ ="""cosine_with_restarts"""
a_ ="""polynomial"""
a_ ="""constant"""
a_ ="""constant_with_warmup"""
a_ ="""piecewise_constant"""
def _a ( UpperCamelCase_ : Optimizer , UpperCamelCase_ : int = -1 ) -> Optional[int]:
"""simple docstring"""
return LambdaLR(UpperCamelCase_ , lambda UpperCamelCase_ : 1 , last_epoch=UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optimizer , UpperCamelCase_ : int , UpperCamelCase_ : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(UpperCamelCase_ : int ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1.0 , UpperCamelCase_ ) )
return 1.0
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , last_epoch=UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optimizer , UpperCamelCase_ : str , UpperCamelCase_ : int = -1 ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ , lowerCAmelCase__ = rule_str.split(":" )
lowerCAmelCase__ = int(UpperCamelCase_ )
lowerCAmelCase__ = float(UpperCamelCase_ )
lowerCAmelCase__ = value
lowerCAmelCase__ = float(rule_list[-1] )
def create_rules_function(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ):
def rule_func(UpperCamelCase_ : int ) -> float:
lowerCAmelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCamelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ = create_rules_function(UpperCamelCase_ , UpperCamelCase_ )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , last_epoch=UpperCamelCase_ )
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=-1 ) -> Any:
"""simple docstring"""
def lr_lambda(UpperCamelCase_ : int ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optimizer , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0.5 , UpperCamelCase_ : int = -1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(UpperCamelCase_ : List[str] ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCamelCase_ ) * 2.0 * progress )) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optimizer , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int = 1 , UpperCamelCase_ : int = -1 ) -> List[Any]:
"""simple docstring"""
def lr_lambda(UpperCamelCase_ : List[Any] ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCamelCase_ ) * progress) % 1.0) )) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : str=1e-7 , UpperCamelCase_ : Tuple=1.0 , UpperCamelCase_ : Optional[int]=-1 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(UpperCamelCase_ : int ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ = lr_init - lr_end
lowerCAmelCase__ = num_training_steps - num_warmup_steps
lowerCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
a_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _a ( UpperCamelCase_ : Union[str, SchedulerType] , UpperCamelCase_ : Optimizer , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : int = 1 , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : int = -1 , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = SchedulerType(UpperCamelCase_ )
lowerCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCamelCase_ , last_epoch=UpperCamelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCamelCase_ , step_rules=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , num_cycles=UpperCamelCase_ , last_epoch=UpperCamelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , power=UpperCamelCase_ , last_epoch=UpperCamelCase_ , )
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
| 340 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =BartphoTokenizer
a_ =False
a_ =True
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
super().setUp()
lowerCAmelCase__ = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"{token} {vocab_tokens[token]}\n" )
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCAmelCase__ = "This is a là test"
lowerCAmelCase__ = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 340 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ , UpperCamelCase_ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_sql_dataset(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCAmelCase__ = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
lowerCAmelCase__ = iter_sql_file(UpperCamelCase_ )
for rowa, rowa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert rowa == rowa
@require_sqlalchemy
def _a ( UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = tmp_path / "cache"
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "tmp.sql" )
lowerCAmelCase__ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=UpperCamelCase_ ).read()
with pytest.raises(UpperCamelCase_ ):
SqlDatasetWriter(UpperCamelCase_ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 |
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340 | 1 |
def _a ( ) -> int:
"""simple docstring"""
return 1
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(UpperCamelCase_ )
def _a ( UpperCamelCase_ : int = 200 ) -> int:
"""simple docstring"""
return two_pound(UpperCamelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 340 |
import collections
import importlib.util
import os
import re
from pathlib import Path
a_ = '''src/transformers'''
# Matches is_xxx_available()
a_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
a_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
a_ = re.compile(r'''^\s*else:''')
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowerCAmelCase__ = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowerCAmelCase__ = re.findall("\[([^\]]+)\]" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(", " )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
def find_duplicates(UpperCamelCase_ : str ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = "base imports" if key == "none" else F"{key} backend"
errors.append(F"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , "__init__.py" )
lowerCAmelCase__ = parse_init(UpperCamelCase_ )
if objects is not None:
lowerCAmelCase__ = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("\n\n".join(UpperCamelCase_ ) )
def _a ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , "." )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowerCAmelCase__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
a_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _a ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = importlib.util.spec_from_file_location(
"transformers" , os.path.join(UpperCamelCase_ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCamelCase_ ) > 0:
lowerCAmelCase__ = "\n".join(F"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 340 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> Any:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = field
lowerCAmelCase__ = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
lowerCAmelCase__ = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
lowerCAmelCase__ = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
lowerCAmelCase__ = dataset
lowerCAmelCase__ = path_or_buf
lowerCAmelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCAmelCase__ = num_proc
lowerCAmelCase__ = "utf-8"
lowerCAmelCase__ = to_json_kwargs
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.to_json_kwargs.pop("path_or_buf" , __UpperCAmelCase )
lowerCAmelCase__ = self.to_json_kwargs.pop("orient" , "records" )
lowerCAmelCase__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowerCAmelCase__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowerCAmelCase__ = self.to_json_kwargs.pop("compression" , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=__UpperCAmelCase ) as buffer:
lowerCAmelCase__ = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead." )
lowerCAmelCase__ = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = args
lowerCAmelCase__ = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCAmelCase__ = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , )-> int:
'''simple docstring'''
lowerCAmelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowerCAmelCase__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
lowerCAmelCase__ , lowerCAmelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( UpperCamelCase_ : str ) -> Dict:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase_ ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
lowerCAmelCase__ = PipelineDataFormat.from_str(
format=UpperCamelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase_ , UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = nlp
lowerCAmelCase__ = reader
@staticmethod
def UpperCAmelCase ( __UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=__UpperCAmelCase , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=__UpperCAmelCase , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=__UpperCAmelCase , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=__UpperCAmelCase , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=__UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=__UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=__UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=__UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self._nlp, []
for entry in self._reader:
lowerCAmelCase__ = nlp(**__UpperCAmelCase ) if self._reader.is_multi_columns else nlp(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
outputs.append(__UpperCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase__ = self._reader.save_binary(__UpperCAmelCase )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(__UpperCAmelCase )
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
a_ = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 340 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """tokenizer"""]
a_ ="""LayoutLMv2ImageProcessor"""
a_ =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> Tuple:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
lowerCAmelCase__ = self.image_processor(images=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ = features["words"]
lowerCAmelCase__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , stride=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , return_special_tokens_mask=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , return_length=__UpperCAmelCase , verbose=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase , )
# add pixel values
lowerCAmelCase__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowerCAmelCase__ = self.get_overflowing_images(__UpperCAmelCase , encoded_inputs["overflow_to_sample_mapping"] )
lowerCAmelCase__ = images
return encoded_inputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(__UpperCAmelCase )} and {len(__UpperCAmelCase )}" )
return images_with_overflow
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 340 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def _a ( UpperCamelCase_ : float ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(UpperCamelCase_ , 0 , UpperCamelCase_ , args=(UpperCamelCase_) )[0]
def _a ( UpperCamelCase_ : float , UpperCamelCase_ : float ) -> float:
"""simple docstring"""
return math.pow(UpperCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase__ = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCAmelCase__ = {"unk_token": "<unk>"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCAmelCase ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Any:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_image_processor(do_normalize=__UpperCAmelCase )
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = processor(images=__UpperCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=__UpperCAmelCase , return_tensors="np" )
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = [["cat", "nasa badge"], ["person"]]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = max([len(__UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = "google/owlvit-base-patch32"
lowerCAmelCase__ = OwlViTProcessor.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = ["cat", "nasa badge"]
lowerCAmelCase__ = processor(text=__UpperCAmelCase )
lowerCAmelCase__ = 16
lowerCAmelCase__ = inputs["input_ids"]
lowerCAmelCase__ = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(images=__UpperCAmelCase , query_images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = OwlViTProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 | 1 |
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 340 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" )
return image
def _a ( UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowerCAmelCase__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
lowerCAmelCase__ = qkv_bias
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = 364 if "coco" in model_name else 224
lowerCAmelCase__ = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase__ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase__ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
lowerCAmelCase__ = BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
lowerCAmelCase__ = tokenizer("\n" , add_special_tokens=UpperCamelCase_ ).input_ids[0]
lowerCAmelCase__ , lowerCAmelCase__ = get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowerCAmelCase__ = BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
lowerCAmelCase__ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCAmelCase__ , lowerCAmelCase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
if key.startswith("Qformer.bert" ):
lowerCAmelCase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCAmelCase__ = key.replace("self" , "attention" )
if "opt_proj" in key:
lowerCAmelCase__ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
lowerCAmelCase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
lowerCAmelCase__ = key.replace("opt" , "language" )
if key.startswith("t5" ):
lowerCAmelCase__ = key.replace("t5" , "language" )
lowerCAmelCase__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase__ = load_demo_image()
lowerCAmelCase__ = vis_processors["eval"](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
lowerCAmelCase__ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCamelCase_ )
# create processor
lowerCAmelCase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
lowerCAmelCase__ = processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase__ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
lowerCAmelCase__ = hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
lowerCAmelCase__ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
lowerCAmelCase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ = hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase__ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase__ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=UpperCamelCase_ )
else:
# cast to same type
lowerCAmelCase__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
lowerCAmelCase__ = ""
lowerCAmelCase__ = tokenizer(UpperCamelCase_ , return_tensors="pt" ).input_ids.to(UpperCamelCase_ )
lowerCAmelCase__ = original_model.generate({"image": original_pixel_values} )
lowerCAmelCase__ = hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , UpperCamelCase_ )
lowerCAmelCase__ = input_ids.shape[1]
lowerCAmelCase__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
lowerCAmelCase__ = [text.strip() for text in output_text]
print("HF generation:" , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 340 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a_ = {'''facebook/blenderbot_small-90M''': 512}
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(UpperCamelCase_ )
return pairs
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="__start__" , __UpperCAmelCase="__end__" , __UpperCAmelCase="__unk__" , __UpperCAmelCase="__null__" , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__UpperCAmelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {}
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = re.sub("([.,!?()])" , R" \1" , __UpperCAmelCase )
lowerCAmelCase__ = re.sub("(')" , R" \1 " , __UpperCAmelCase )
lowerCAmelCase__ = re.sub(R"\s{2,}" , " " , __UpperCAmelCase )
if "\n" in token:
lowerCAmelCase__ = token.replace("\n" , " __newln__" )
lowerCAmelCase__ = token.split(" " )
lowerCAmelCase__ = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
lowerCAmelCase__ = token.lower()
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
lowerCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ = "@@ ".join(__UpperCAmelCase )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
lowerCAmelCase__ = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 340 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a_ = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
lowerCAmelCase__ = random.Random()
lowerCAmelCase__ = 1
for dim in shape:
total_dims *= dim
lowerCAmelCase__ = []
for _ in range(UpperCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCAmelCase__ = np.array(UpperCamelCase_ , dtype=jnp.intaa ).reshape(UpperCamelCase_ )
return output
def _a ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=None ) -> str:
"""simple docstring"""
lowerCAmelCase__ = ids_tensor(UpperCamelCase_ , vocab_size=2 , rng=UpperCamelCase_ )
# make sure that at least one token is attended to for each batch
lowerCAmelCase__ = 1
return attn_mask
@require_flax
class lowercase__ :
a_ =None
a_ =()
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCAmelCase__ = 2
lowerCAmelCase__ = inputs["input_ids"].shape[-1] // 2
lowerCAmelCase__ = inputs["input_ids"][:max_batch_size, :sequence_length]
lowerCAmelCase__ = jnp.ones_like(__UpperCAmelCase )
lowerCAmelCase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCAmelCase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCAmelCase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 0
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase__ = getattr(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = pt_model_class(__UpperCAmelCase ).eval()
lowerCAmelCase__ = load_flax_weights_in_pytorch_model(__UpperCAmelCase , flax_model.params )
lowerCAmelCase__ = flax_model.generate(__UpperCAmelCase ).sequences
lowerCAmelCase__ = pt_model.generate(torch.tensor(__UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCAmelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 0.8
lowerCAmelCase__ = 10
lowerCAmelCase__ = 0.3
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
lowerCAmelCase__ = max_length
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = False
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = True
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 )
lowerCAmelCase__ = 2
lowerCAmelCase__ = max_length
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ = model_class(__UpperCAmelCase )
lowerCAmelCase__ = model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCAmelCase )
lowerCAmelCase__ = jit(model.generate )
lowerCAmelCase__ = jit_generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowerCAmelCase__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowerCAmelCase__ = "Hello world"
lowerCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCAmelCase , "do_samples" ):
model.generate(__UpperCAmelCase , do_samples=__UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCAmelCase , "foo" ):
lowerCAmelCase__ = {"foo": "bar"}
model.generate(__UpperCAmelCase , **__UpperCAmelCase )
| 340 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.