code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
A__ : List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
A__ : Union[str, Any] = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
A__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A__ : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A__ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : List[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A__ : int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A__ : int = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(A__ )
class _UpperCAmelCase :
"""simple docstring"""
def __call__( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Union[bool, str] = False, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Optional[bool] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
lowercase__ = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = titles if not isinstance(lowerCamelCase, lowerCamelCase ) else [titles]
lowercase__ = texts if not isinstance(lowerCamelCase, lowerCamelCase ) else [texts]
lowercase__ = len(lowerCamelCase )
lowercase__ = questions if not isinstance(lowerCamelCase, lowerCamelCase ) else [questions] * n_passages
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowerCamelCase )} titles and {len(lowerCamelCase )} texts.""" )
lowercase__ = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase )['''input_ids''']
lowercase__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase )
]
}
if return_attention_mask is not False:
lowercase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase )
def lowercase__ ( self : Tuple, lowerCamelCase : BatchEncoding, lowerCamelCase : DPRReaderOutput, lowerCamelCase : int = 16, lowerCamelCase : int = 64, lowerCamelCase : int = 4, ):
'''simple docstring'''
lowercase__ = reader_input['''input_ids''']
lowercase__ , lowercase__ , lowercase__ = reader_output[:3]
lowercase__ = len(lowerCamelCase )
lowercase__ = sorted(range(lowerCamelCase ), reverse=lowerCamelCase, key=relevance_logits.__getitem__ )
lowercase__ = []
for doc_id in sorted_docs:
lowercase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ = sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ = sequence_ids.index(self.pad_token_id )
else:
lowercase__ = len(lowerCamelCase )
lowercase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : List[int], lowerCamelCase : int, lowerCamelCase : int, ):
'''simple docstring'''
lowercase__ = []
for start_index, start_score in enumerate(lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ = sorted(lowerCamelCase, key=lambda lowerCamelCase : x[1], reverse=lowerCamelCase )
lowercase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
lowercase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowercase__ = ["""input_ids""", """attention_mask"""]
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
lowercase__ = field(default=A__ ,metadata={"""help""": """Whether tp freeze the encoder."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowercase__ = field(
default="""summarization""" ,metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} ,)
lowercase__ = field(
default=1_024 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} ,)
lowercase__ = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(default=-1 ,metadata={"""help""": """# training examples. -1 means use all."""} )
lowercase__ = field(default=-1 ,metadata={"""help""": """# validation examples. -1 means use all."""} )
lowercase__ = field(default=-1 ,metadata={"""help""": """# test examples. -1 means use all."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Source language id for translation."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """Target language id for translation."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """# num_beams to use for evaluation."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} ,)
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , F"""{split}_results.json""" ) )
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
assert hasattr(lowerCamelCase_ , lowerCamelCase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , lowerCamelCase_ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , data_collator=SeqaSeqDataCollator(
lowerCamelCase_ , lowerCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate(metric_key_prefix='''val''' )
lowercase__ = data_args.n_val
lowercase__ = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase__ = trainer.predict(test_dataset=lowerCamelCase_ , metric_key_prefix='''test''' )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
lowercase__ = lmap(str.strip , lowerCamelCase_ )
write_txt_file(lowerCamelCase_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( lowerCamelCase_ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : List[str], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
# add pixel_values + pixel_mask
lowercase__ = self.image_processor(
lowerCamelCase, return_tensors=lowerCamelCase, do_normalize=lowerCamelCase, do_center_crop=lowerCamelCase, **lowerCamelCase )
encoding.update(lowerCamelCase )
return encoding
def lowercase__ ( self : str, *lowerCamelCase : int, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Any, *lowerCamelCase : Optional[Any], **lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Union[str, Any], *lowerCamelCase : Any, **lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Optional[int], *lowerCamelCase : Optional[Any], **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Any, *lowerCamelCase : str, **lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Optional[Any], *lowerCamelCase : List[Any], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : int, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : str, *lowerCamelCase : int, **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : List[str], *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Optional[Any], *lowerCamelCase : Dict, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[Any], *lowerCamelCase : Dict, **lowerCamelCase : int ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Any, *lowerCamelCase : Tuple, **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Union[str, Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : int, *lowerCamelCase : int, **lowerCamelCase : Optional[int] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : int, *lowerCamelCase : Any, **lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Any, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : str, *lowerCamelCase : Dict, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : int, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Union[str, Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : str ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[str], *lowerCamelCase : Dict, **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Optional[Any], *lowerCamelCase : Optional[int], **lowerCamelCase : int ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Optional[int], *lowerCamelCase : str, **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Dict, *lowerCamelCase : int, **lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Dict, *lowerCamelCase : str, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[Any], *lowerCamelCase : List[Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Tuple, *lowerCamelCase : str, **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : int, *lowerCamelCase : Dict, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : int, *lowerCamelCase : Tuple, **lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : int, *lowerCamelCase : Any, **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : Union[str, Any], *lowerCamelCase : Optional[int], **lowerCamelCase : List[str] ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Tuple ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : str, *lowerCamelCase : Any, **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
class _UpperCAmelCase ( metaclass=A__ ):
"""simple docstring"""
lowercase__ = ["""flax"""]
def __init__( self : int, *lowerCamelCase : List[str], **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[Any], *lowerCamelCase : Union[str, Any], **lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls, ['''flax'''] )
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowercase__ = sorted(string.lower() )
return len(lowerCamelCase_ ) == len(set(lowerCamelCase_ ) )
if __name__ == "__main__":
A__ : int = input('Enter a string ').strip()
A__ : Optional[Any] = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : list[tuple[float, float]] ):
'''simple docstring'''
lowercase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowercase__ = len(lowerCamelCase ) - 1
def lowercase__ ( self : Any, lowerCamelCase : float ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCamelCase ), 5 ) == 1
return output_values
def lowercase__ ( self : str, lowerCamelCase : float ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowercase__ = self.basis_function(lowerCamelCase )
lowercase__ = 0.0
lowercase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase__ ( self : List[Any], lowerCamelCase : float = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
lowercase__ = [] # x coordinates of points to plot
lowercase__ = [] # y coordinates of points to plot
lowercase__ = 0.0
while t <= 1:
lowercase__ = self.bezier_curve_function(lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowercase__ = [i[0] for i in self.list_of_points]
lowercase__ = [i[1] for i in self.list_of_points]
plt.plot(
lowerCamelCase, lowerCamelCase, color='''blue''', label='''Curve of Degree ''' + str(self.degree ), )
plt.scatter(lowerCamelCase, lowerCamelCase, color='''red''', label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
A__ : Tuple = logging.getLogger(__name__)
A__ : Any = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
A__ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(A__ )} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,)
lowercase__ = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
def lowercase__ ( self : Tuple ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=A__ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=5 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
lowercase__ = field(
default=0.15 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} ,)
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
lowercase__ = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase__ = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase__ = [json.loads(lowerCamelCase_ ) for line in f.read().splitlines() if (len(lowerCamelCase_ ) > 0 and not line.isspace())]
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
lowercase__ = {c: dataset[c] for c in dataset.column_names}
lowercase__ = refs
return Dataset.from_dict(lowerCamelCase_ )
def a ( ):
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowercase__ = {}
if data_args.train_file is not None:
lowercase__ = data_args.train_file
if data_args.validation_file is not None:
lowercase__ = data_args.validation_file
lowercase__ = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase__ = '''text'''
lowercase__ = load_dataset(lowerCamelCase_ , data_files=lowerCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
lowercase__ = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCamelCase_ )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCamelCase_ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase__ = AutoModelForMaskedLM.from_config(lowerCamelCase_ )
model.resize_token_embeddings(len(lowerCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase__ = datasets['''train'''].column_names
else:
lowercase__ = datasets['''validation'''].column_names
lowercase__ = '''text''' if '''text''' in column_names else column_names[0]
lowercase__ = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(lowerCamelCase_ ):
# Remove empty lines
lowercase__ = [line for line in examples['''text'''] if len(lowerCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=data_args.max_seq_length )
lowercase__ = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase__ = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase__ = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase__ = DataCollatorForWholeWordMask(tokenizer=lowerCamelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
lowercase__ = trainer.train(resume_from_checkpoint=lowerCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output['''eval_loss'''] )
lowercase__ = perplexity
lowercase__ = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a ( lowerCamelCase_ ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A__ : List[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A__ : Dict = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A__ : int = sorted(arg_to_scheduler.keys())
A__ : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _UpperCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : argparse.Namespace, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]="base", lowerCamelCase : List[str]=None, lowerCamelCase : Any=None, lowerCamelCase : str=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase )
lowercase__ = 0
lowercase__ = Path(self.hparams.output_dir )
lowercase__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowercase__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({'''num_labels''': num_labels} if num_labels is not None else {}), cache_dir=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = config
lowercase__ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams, lowerCamelCase, lowerCamelCase ):
assert hasattr(self.config, lowerCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config, lowerCamelCase, getattr(self.hparams, lowerCamelCase ) )
if tokenizer is None:
lowercase__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=lowerCamelCase, )
else:
lowercase__ = tokenizer
lowercase__ = MODEL_MODES[mode]
if model is None:
lowercase__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ), config=self.config, cache_dir=lowerCamelCase, )
else:
lowercase__ = model
def lowercase__ ( self : Dict, *lowerCamelCase : List[str], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = self.model_type.from_pretrained(*lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.hparams.lr_scheduler]
lowercase__ = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
lowercase__ = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
lowercase__ = Adafactor(
lowerCamelCase, lr=self.hparams.learning_rate, scale_parameter=lowerCamelCase, relative_step=lowerCamelCase )
else:
lowercase__ = AdamW(
lowerCamelCase, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
lowercase__ = optimizer
lowercase__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowercase__ ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : List[str] ):
'''simple docstring'''
return self.validation_step(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
return self.validation_end(lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
lowercase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowercase__ ( self : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if stage == "test":
lowercase__ = len(self.test_dataloader().dataset )
else:
lowercase__ = self.get_dataloader('''train''', self.hparams.train_batch_size, shuffle=lowerCamelCase )
lowercase__ = len(self.train_dataloader().dataset )
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self.train_loader
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return self.get_dataloader('''dev''', self.hparams.eval_batch_size, shuffle=lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return self.get_dataloader('''test''', self.hparams.eval_batch_size, shuffle=lowerCamelCase )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir, '''cached_{}_{}_{}'''.format(
lowerCamelCase, list(filter(lowerCamelCase, self.hparams.model_name_or_path.split('''/''' ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def lowercase__ ( self : Dict, lowerCamelCase : Dict[str, Any] ):
'''simple docstring'''
lowercase__ = self.output_dir.joinpath('''best_tfmr''' )
lowercase__ = self.step_count
self.model.save_pretrained(lowerCamelCase )
self.tokenizer.save_pretrained(lowerCamelCase )
@staticmethod
def lowercase__ ( lowerCamelCase : Tuple, lowerCamelCase : List[Any] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''', default=lowerCamelCase, type=lowerCamelCase, required=lowerCamelCase, help='''Path to pretrained model or model identifier from huggingface.co/models''', )
parser.add_argument(
'''--config_name''', default='''''', type=lowerCamelCase, help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''', default=lowerCamelCase, type=lowerCamelCase, help='''Pretrained tokenizer name or path if not the same as model_name''', )
parser.add_argument(
'''--cache_dir''', default=str(Path(lowerCamelCase ).parent / '''test_run''' / '''cache''' ), type=lowerCamelCase, help='''Where do you want to store the pre-trained models downloaded from huggingface.co''', )
parser.add_argument(
'''--encoder_layerdrop''', type=lowerCamelCase, help='''Encoder layer dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--decoder_layerdrop''', type=lowerCamelCase, help='''Decoder layer dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--dropout''', type=lowerCamelCase, help='''Dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--attention_dropout''', type=lowerCamelCase, help='''Attention dropout probability (Optional). Goes into model.config''', )
parser.add_argument('''--learning_rate''', default=5E-5, type=lowerCamelCase, help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''', default='''linear''', choices=lowerCamelCase, metavar=lowerCamelCase, type=lowerCamelCase, help='''Learning rate scheduler''', )
parser.add_argument('''--weight_decay''', default=0.0, type=lowerCamelCase, help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''', default=1E-8, type=lowerCamelCase, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''', default=0, type=lowerCamelCase, help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''', default=4, type=lowerCamelCase, help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''', dest='''max_epochs''', default=3, type=lowerCamelCase )
parser.add_argument('''--train_batch_size''', default=32, type=lowerCamelCase )
parser.add_argument('''--eval_batch_size''', default=32, type=lowerCamelCase )
parser.add_argument('''--adafactor''', action='''store_true''' )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : List[str], lowerCamelCase : List[str] ):
'''simple docstring'''
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase )
class _UpperCAmelCase ( pl.Callback ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = trainer.lr_schedulers[0]['''scheduler''']
lowercase__ = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase )
def lowercase__ ( self : Optional[Any], lowerCamelCase : pl.Trainer, lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
lowercase__ = trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCamelCase, str(metrics[key] ) ) )
def lowercase__ ( self : Any, lowerCamelCase : pl.Trainer, lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
lowercase__ = trainer.callback_metrics
# Log and save results to file
lowercase__ = os.path.join(pl_module.hparams.output_dir, '''test_results.txt''' )
with open(lowerCamelCase, '''w''' ) as writer:
for key in sorted(lowerCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCamelCase, str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowerCamelCase, str(metrics[key] ) ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCamelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCamelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCamelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCamelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCamelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCamelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCamelCase_ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCamelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCamelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=[] , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
lowercase__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
lowercase__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase_ )
if logging_callback is None:
lowercase__ = LoggingCallback()
lowercase__ = {}
if args.fpaa:
lowercase__ = 16
if args.gpus > 1:
lowercase__ = '''auto'''
lowercase__ = '''ddp'''
lowercase__ = args.accumulate_grad_batches
lowercase__ = None
lowercase__ = '''auto'''
lowercase__ = pl.Trainer.from_argparse_args(
lowerCamelCase_ , weights_summary=lowerCamelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase_ , )
if args.do_train:
trainer.fit(lowerCamelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : Any=13, lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Dict=True, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : Any=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Any=2, lowerCamelCase : Any=4, lowerCamelCase : Any=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : int=16, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : int=3, lowerCamelCase : Any=4, lowerCamelCase : str=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCamelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFRoFormerModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TFRoFormerForCausalLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ), [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForSequenceClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFRoFormerForMultipleChoice(config=lowerCamelCase )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForTokenClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForQuestionAnswering(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TFRoFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(lowerCamelCase )[0]
# TODO Replace vocab size
lowercase__ = 50_000
lowercase__ = [1, 6, vocab_size]
self.assertEqual(output.shape, lowerCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase, atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = tf.constant([[4, 10]] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6 )
lowercase__ = emba(input_ids.shape )
lowercase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
# 2,12,16,64
lowercase__ = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64 )
lowercase__ = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
lowercase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Union[str, Any] = pytest.mark.integration
@require_faiss
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCamelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase__ ( self : str ):
'''simple docstring'''
import faiss
lowercase__ = self._create_dummy_dataset()
lowercase__ = dset.map(
lambda lowerCamelCase, lowerCamelCase : {"vecs": i * np.ones(5, dtype=np.floataa )}, with_indices=lowerCamelCase, keep_in_memory=lowerCamelCase )
lowercase__ = dset.add_faiss_index('''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase__ ( self : str ):
'''simple docstring'''
import faiss
lowercase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, )
lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import faiss
lowercase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', metric_type=faiss.METRIC_INNER_PRODUCT, )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase ) as tmp_file:
dset.save_faiss_index('''vecs''', tmp_file.name )
dset.load_faiss_index('''vecs2''', tmp_file.name )
os.unlink(tmp_file.name )
lowercase__ , lowercase__ = dset.get_nearest_examples('''vecs2''', np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowerCamelCase, partial(dset.get_nearest_examples, '''vecs2''', np.ones(5, dtype=np.floataa ) ) )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
from elasticsearch import Elasticsearch
lowercase__ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase__ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowercase__ = Elasticsearch()
dset.add_elasticsearch_index('''filename''', es_client=lowerCamelCase )
lowercase__ , lowercase__ = dset.get_nearest_examples('''filename''', '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' )
@require_faiss
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
import faiss
lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal, 5 )
index.add_vectors(np.zeros((5, 5), dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal, 10 )
# single query
lowercase__ = np.zeros(5, dtype=np.floataa )
lowercase__ = 1
lowercase__ , lowercase__ = index.search(lowerCamelCase )
self.assertRaises(lowerCamelCase, index.search, query.reshape(-1, 1 ) )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
# batched queries
lowercase__ = np.eye(5, dtype=np.floataa )[::-1]
lowercase__ , lowercase__ = index.search_batch(lowerCamelCase )
self.assertRaises(lowerCamelCase, index.search_batch, queries[0] )
lowercase__ = [scores[0] for scores in total_scores]
lowercase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase ), 0 )
self.assertListEqual([4, 3, 2, 1, 0], lowerCamelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
import faiss
lowercase__ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
lowercase__ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexLSH )
with self.assertRaises(lowerCamelCase ):
lowercase__ = FaissIndex(string_factory='''Flat''', custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
import faiss
lowercase__ = faiss.IndexFlat(5 )
lowercase__ = FaissIndex(custom_index=lowerCamelCase )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
import faiss
lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase ) as tmp_file:
index.save(tmp_file.name )
lowercase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowercase__ = np.zeros(5, dtype=np.floataa )
lowercase__ = 1
lowercase__ , lowercase__ = index.search(lowerCamelCase )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
@require_faiss
def a ( lowerCamelCase_ ):
'''simple docstring'''
import faiss
lowercase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowercase__ = '''index.faiss'''
lowercase__ = F"""mock://{index_name}"""
index.save(lowerCamelCase_ , storage_options=mockfs.storage_options )
lowercase__ = FaissIndex.load(lowerCamelCase_ , storage_options=mockfs.storage_options )
lowercase__ = np.zeros(5 , dtype=np.floataa )
lowercase__ = 1
lowercase__ , lowercase__ = index.search(lowerCamelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowercase__ = Elasticsearch()
lowercase__ = {'''acknowledged''': True}
lowercase__ = ElasticSearchIndex(es_client=lowerCamelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowercase__ = '''foo'''
lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase__ , lowercase__ = index.search(lowerCamelCase )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# single query with timeout
lowercase__ = '''foo'''
lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowercase__ , lowercase__ = index.search(lowerCamelCase, request_timeout=30 )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# batched queries
lowercase__ = ['''foo''', '''bar''', '''foobar''']
lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase__ , lowercase__ = index.search_batch(lowerCamelCase )
lowercase__ = [scores[0] for scores in total_scores]
lowercase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase ), 0 )
self.assertListEqual([1, 1, 1], lowerCamelCase )
# batched queries with timeout
lowercase__ = ['''foo''', '''bar''', '''foobar''']
lowercase__ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowercase__ , lowercase__ = index.search_batch(lowerCamelCase, request_timeout=30 )
lowercase__ = [scores[0] for scores in total_scores]
lowercase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase ), 0 )
self.assertListEqual([1, 1, 1], lowerCamelCase )
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
A__ : List[str] = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
A__ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
A__ : List[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
A__ : Union[str, Any] = F"down_blocks.{i}.resnets.{j}."
A__ : str = F"input_blocks.{3*i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
A__ : Tuple = F"down_blocks.{i}.attentions.{j}."
A__ : List[Any] = F"input_blocks.{3*i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
A__ : Dict = F"up_blocks.{i}.resnets.{j}."
A__ : List[Any] = F"output_blocks.{3*i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
A__ : Optional[Any] = F"up_blocks.{i}.attentions.{j}."
A__ : Union[str, Any] = F"output_blocks.{3*i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
A__ : Optional[Any] = F"down_blocks.{i}.downsamplers.0.conv."
A__ : int = F"input_blocks.{3*(i+1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
A__ : Dict = F"up_blocks.{i}.upsamplers.0."
A__ : Any = F"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
A__ : str = 'mid_block.attentions.0.'
A__ : List[str] = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
A__ : Optional[Any] = F"mid_block.resnets.{j}."
A__ : str = F"middle_block.{2*j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def a ( lowerCamelCase_ ):
'''simple docstring'''
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowercase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ = v.replace(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ = v.replace(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = v
lowercase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
A__ : Tuple = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
A__ : List[str] = F"encoder.down_blocks.{i}.resnets.{j}."
A__ : Dict = F"encoder.down.{i}.block.{j}."
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
A__ : int = F"down_blocks.{i}.downsamplers.0."
A__ : Optional[int] = F"down.{i}.downsample."
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
A__ : Any = F"up_blocks.{i}.upsamplers.0."
A__ : List[str] = F"up.{3-i}.upsample."
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
A__ : Union[str, Any] = F"decoder.up_blocks.{i}.resnets.{j}."
A__ : int = F"decoder.up.{3-i}.block.{j}."
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
A__ : List[str] = F"mid_block.resnets.{i}."
A__ : List[Any] = F"mid.block_{i+1}."
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
A__ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def a ( lowerCamelCase_ ):
'''simple docstring'''
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ = v.replace(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ = v.replace(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = v
lowercase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ = reshape_weight_for_sd(lowerCamelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
A__ : Any = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
A__ : Optional[int] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
A__ : Dict = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
A__ : Union[str, Any] = {'q': 0, 'k': 1, 'v': 2}
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = {}
lowercase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
lowercase__ = k[: -len('''.q_proj.weight''' )]
lowercase__ = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
lowercase__ = [None, None, None]
lowercase__ = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
lowercase__ = k[: -len('''.q_proj.bias''' )]
lowercase__ = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
lowercase__ = [None, None, None]
lowercase__ = v
continue
lowercase__ = textenc_pattern.sub(lambda lowerCamelCase_ : protected[re.escape(m.group(0 ) )] , lowerCamelCase_ )
lowercase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase__ = textenc_pattern.sub(lambda lowerCamelCase_ : protected[re.escape(m.group(0 ) )] , lowerCamelCase_ )
lowercase__ = torch.cat(lowerCamelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
lowercase__ = textenc_pattern.sub(lambda lowerCamelCase_ : protected[re.escape(m.group(0 ) )] , lowerCamelCase_ )
lowercase__ = torch.cat(lowerCamelCase_ )
return new_state_dict
def a ( lowerCamelCase_ ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
A__ : Union[str, Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
A__ : Any = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
A__ : Optional[int] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
A__ : str = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
A__ : str = load_file(unet_path, device='cpu')
else:
A__ : Optional[Any] = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
A__ : List[Any] = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
A__ : Optional[Any] = load_file(vae_path, device='cpu')
else:
A__ : Union[str, Any] = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
A__ : Dict = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
A__ : Any = load_file(text_enc_path, device='cpu')
else:
A__ : str = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
A__ : List[str] = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
A__ : Optional[Any] = convert_unet_state_dict(unet_state_dict)
A__ : Optional[Any] = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
A__ : str = convert_vae_state_dict(vae_state_dict)
A__ : Tuple = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
A__ : str = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
A__ : Optional[int] = {'transformer.' + k: v for k, v in text_enc_dict.items()}
A__ : Any = convert_text_enc_state_dict_vaa(text_enc_dict)
A__ : Tuple = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
A__ : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
A__ : Optional[Any] = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
A__ : int = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
A__ : Optional[int] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
A__ : List[Any] = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A__ : str = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = test_results.split(''' ''' )
lowercase__ = 0
lowercase__ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowercase__ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
lowercase__ = None
lowercase__ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
lowercase__ = True
lowercase__ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
lowercase__ = line
lowercase__ = False
return failures
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = title
lowercase__ = doc_test_results['''time_spent'''].split(''',''' )[0]
lowercase__ = doc_test_results['''success''']
lowercase__ = doc_test_results['''failures''']
lowercase__ = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowercase__ = doc_test_results
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = [self._time_spent]
lowercase__ = 0
for time in time_spent:
lowercase__ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCamelCase ) == 1:
lowercase__ = [0, 0, time_parts[0]]
lowercase__ , lowercase__ , lowercase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowercase__ , lowercase__ , lowercase__ = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(lowerCamelCase )}h{int(lowerCamelCase )}m{int(lowerCamelCase )}s"""
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = 40
lowercase__ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowerCamelCase, lowerCamelCase )}
lowercase__ = ''''''
for category, failures in category_failures.items():
if len(lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCamelCase )
@staticmethod
def lowercase__ ( ):
'''simple docstring'''
lowercase__ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=lowerCamelCase, )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
lowercase__ = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
lowercase__ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=lowerCamelCase, )
def lowercase__ ( self : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = ''''''
for key, value in failures.items():
lowercase__ = value[:200] + ''' [Truncated]''' if len(lowerCamelCase ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowercase__ = job_name
lowercase__ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
lowercase__ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
lowercase__ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
lowercase__ = sorted(self.doc_test_results.items(), key=lambda lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
lowercase__ = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowercase__ = job_result['''failures''']
lowercase__ = self.get_reply_blocks(lowerCamelCase, lowerCamelCase, lowerCamelCase, text=lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=F"""Results for {job}""", blocks=lowerCamelCase, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def a ( ):
'''simple docstring'''
lowercase__ = os.environ['''GITHUB_RUN_ID''']
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowercase__ = requests.get(lowerCamelCase_ ).json()
lowercase__ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowercase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
lowercase__ = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
if os.path.exists(lowerCamelCase_ ):
lowercase__ = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}.""" ) from e
return _artifact
def a ( ):
'''simple docstring'''
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = name
lowercase__ = []
def __str__( self : Union[str, Any] ):
'''simple docstring'''
return self.name
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str ):
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
lowercase__ = {}
lowercase__ = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowercase__ = directory
if artifact_name not in _available_artifacts:
lowercase__ = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
A__ : Optional[int] = get_job_links()
A__ : Union[str, Any] = retrieve_available_artifacts()
A__ : int = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A__ : Any = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A__ : Union[str, Any] = github_actions_job_links.get('run_doctests')
A__ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A__ : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A__ , A__ , A__ : Tuple = handle_test_results(artifact['stats'])
A__ : Optional[Any] = failed
A__ : Optional[int] = success
A__ : str = time_spent[1:-1] + ', '
A__ : Optional[int] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A__ : int = line.replace('FAILED ', '')
A__ : List[str] = line.split()[0].replace('\n', '')
if "::" in line:
A__ , A__ : int = line.split('::')
else:
A__ , A__ : List[str] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A__ : Any = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A__ : Optional[int] = all_failures[test] if test in all_failures else 'N/A'
A__ : List[str] = failure
break
A__ : Union[str, Any] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowercase__ = sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import re
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = split_input(str_ )
return "".join(
[''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = split_input(lowerCamelCase_ )
if upper:
lowercase__ = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowercase__ = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ ):
'''simple docstring'''
return to_simple_case(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
try:
lowercase__ = to_simple_case(lowerCamelCase_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''_''' )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return to_complex_case(lowerCamelCase_ , lowerCamelCase_ , '''-''' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = KandinskyVaaControlnetImgaImgPipeline
lowercase__ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase__ = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
lowercase__ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase__ = False
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return 32
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return 32
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def lowercase__ ( self : Dict ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return 100
@property
def lowercase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**lowerCamelCase )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
lowercase__ = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
lowercase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(lowerCamelCase )
else:
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase ), return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = init_image.resize((512, 512) )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
lowercase__ = torch.from_numpy(np.array(lowerCamelCase ) ).float() / 255.0
lowercase__ = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowercase__ = '''A robot, 4k photo'''
lowercase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
lowercase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''', torch_dtype=torch.floataa )
lowercase__ = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
lowerCamelCase, image=lowerCamelCase, strength=0.85, generator=lowerCamelCase, negative_prompt='''''', ).to_tuple()
lowercase__ = pipeline(
image=lowerCamelCase, image_embeds=lowerCamelCase, negative_image_embeds=lowerCamelCase, hint=lowerCamelCase, generator=lowerCamelCase, num_inference_steps=100, height=512, width=512, strength=0.5, output_type='''np''', )
lowercase__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase )
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A__ : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A__ : Union[str, Any] = logging.getLogger()
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowercase__ = parser.parse_args()
return args.f
def a ( lowerCamelCase_ , lowerCamelCase_="eval" ):
'''simple docstring'''
lowercase__ = os.path.join(lowerCamelCase_ , F"""{split}_results.json""" )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , '''r''' ) as f:
return json.load(lowerCamelCase_ )
raise ValueError(F"""can't find {path}""" )
A__ : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_flax_glue.main()
lowercase__ = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_clm_flax.main()
lowercase__ = get_results(lowerCamelCase )
self.assertLess(result['''eval_perplexity'''], 100 )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_summarization_flax.main()
lowercase__ = get_results(lowerCamelCase, split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''], 10 )
self.assertGreaterEqual(result['''test_rouge2'''], 2 )
self.assertGreaterEqual(result['''test_rougeL'''], 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''], 7 )
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_mlm_flax.main()
lowercase__ = get_results(lowerCamelCase )
self.assertLess(result['''eval_perplexity'''], 42 )
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_ta_mlm_flax.main()
lowercase__ = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.42 )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowercase__ = 7 if get_gpu_count() > 1 else 2
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_flax_ner.main()
lowercase__ = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
self.assertGreaterEqual(result['''eval_f1'''], 0.3 )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(lowerCamelCase, '''argv''', lowerCamelCase ):
run_qa.main()
lowercase__ = get_results(lowerCamelCase )
self.assertGreaterEqual(result['''eval_f1'''], 30 )
self.assertGreaterEqual(result['''eval_exact'''], 30 )
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[str]=13, lowerCamelCase : Any=[30, 30], lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Tuple=3, lowerCamelCase : int=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Tuple=32, lowerCamelCase : Tuple=5, lowerCamelCase : Union[str, Any]=4, lowerCamelCase : Optional[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : List[str]=0.02, lowerCamelCase : str=3, lowerCamelCase : List[Any]=None, lowerCamelCase : Tuple=8, lowerCamelCase : str=10, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = n_targets
lowercase__ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ = num_patches + 1 + self.num_detection_tokens
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ = []
for i in range(self.batch_size ):
lowercase__ = {}
lowercase__ = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase )
lowercase__ = torch.rand(self.n_targets, 4, device=lowerCamelCase )
labels.append(lowerCamelCase )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = YolosModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(pixel_values=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ = model(pixel_values=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str], lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ = []
for i in range(self.model_tester.batch_size ):
lowercase__ = {}
lowercase__ = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long )
lowercase__ = torch.ones(
self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float )
labels.append(lowerCamelCase )
lowercase__ = labels
return inputs_dict
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = YolosModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
# YOLOS does not use inputs_embeds
pass
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
# in YOLOS, the seq_len is different
lowercase__ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ = True
lowercase__ = False
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowercase__ = len(lowerCamelCase )
# Check attention is always last and order is fine
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase ) )
lowercase__ = outputs.attentions
self.assertEqual(len(lowerCamelCase ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[Any], lowerCamelCase : str, lowerCamelCase : Optional[int] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
# YOLOS has a different seq_length
lowercase__ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCamelCase )
@slow
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = YolosModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(inputs.pixel_values )
# verify outputs
lowercase__ = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=lowerCamelCase, )
lowercase__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4 ) )
# verify postprocessing
lowercase__ = image_processor.post_process_object_detection(
lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]] )[0]
lowercase__ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCamelCase )
lowercase__ = [75, 75, 17, 63, 17]
lowercase__ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCamelCase )
self.assertEqual(len(results['''scores'''] ), 5 )
self.assertTrue(torch.allclose(results['''scores'''], lowerCamelCase, atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist(), lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :], lowerCamelCase ) )
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
def a ( lowerCamelCase_=2_8123 ):
'''simple docstring'''
lowercase__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase__ = set()
lowercase__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowerCamelCase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Any = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = """roberta"""
def __init__( self : str, lowerCamelCase : Dict=50_265, lowerCamelCase : int=768, lowerCamelCase : Optional[int]=12, lowerCamelCase : Dict=12, lowerCamelCase : Optional[Any]=3_072, lowerCamelCase : Union[str, Any]="gelu", lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Dict=0.1, lowerCamelCase : str=512, lowerCamelCase : str=2, lowerCamelCase : int=0.02, lowerCamelCase : Any=1E-12, lowerCamelCase : int=1, lowerCamelCase : List[str]=0, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Any="absolute", lowerCamelCase : Dict=True, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : Any ):
'''simple docstring'''
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase, lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase, lowerCamelCase, delta=lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step, 3 )
self.assertEqual(len(accumulator.gradients ), 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1E-2 )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = None
ops.enable_eager_execution_internal()
lowercase__ = tf.config.list_physical_devices('''CPU''' )
if len(lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowercase__ = tf.config.list_logical_devices(device_type='''CPU''' )
lowercase__ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowercase__ = GradientAccumulator()
lowercase__ = tf.Variable([4.0, 3.0] )
lowercase__ , lowercase__ = create_optimizer(5E-5, 10, 5 )
lowercase__ = tf.Variable([0.0, 0.0], trainable=lowerCamelCase )
def accumulate_on_replica(lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients, [variable] ) ) )
@tf.function
def accumulate(lowerCamelCase : str, lowerCamelCase : Optional[Any] ):
with strategy.scope():
lowercase__ = strategy.experimental_local_results(lowerCamelCase )
local_variables[0].assign(lowerCamelCase )
local_variables[1].assign(lowerCamelCase )
strategy.run(lowerCamelCase, args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCamelCase )
def _check_local_values(lowerCamelCase : Dict, lowerCamelCase : Optional[Any] ):
lowercase__ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value(), lowerCamelCase, tol=1E-2 )
self.assertListAlmostEqual(values[1].value(), lowerCamelCase, tol=1E-2 )
accumulate([1.0, 2.0], [-1.0, 1.0] )
accumulate([3.0, -1.0], [-1.0, -1.0] )
accumulate([-2.0, 2.0], [3.0, -2.0] )
self.assertEqual(accumulator.step, 3 )
_check_local_values([2.0, 3.0], [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step, 0 )
_check_local_values([0.0, 0.0], [0.0, 0.0] )
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : str = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : TransformeraDModel, lowerCamelCase : AutoencoderKL, lowerCamelCase : KarrasDiffusionSchedulers, lowerCamelCase : Optional[Dict[int, str]] = None, ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=lowerCamelCase, vae=lowerCamelCase, scheduler=lowerCamelCase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
lowercase__ = int(lowerCamelCase )
lowercase__ = dict(sorted(self.labels.items() ) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = list(lowerCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[Any], lowerCamelCase : List[int], lowerCamelCase : float = 4.0, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : int = 50, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, ):
'''simple docstring'''
lowercase__ = len(lowerCamelCase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size), generator=lowerCamelCase, device=self.device, dtype=self.transformer.dtype, )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(lowerCamelCase, device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([1_000] * batch_size, device=self.device )
lowercase__ = torch.cat([class_labels, class_null], 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(lowerCamelCase ) // 2]
lowercase__ = torch.cat([half, half], dim=0 )
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
lowercase__ = t
if not torch.is_tensor(lowerCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == '''mps'''
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps], dtype=lowerCamelCase, device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
lowerCamelCase, timestep=lowerCamelCase, class_labels=lowerCamelCase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(lowerCamelCase, len(lowerCamelCase ) // 2, dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps], dim=0 )
lowercase__ = torch.cat([eps, rest], dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(lowerCamelCase, lowerCamelCase, dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2, dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase )
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 3
lowercase__ = (32, 32)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
return model
@property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModel(lowerCamelCase )
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
def extract(*lowerCamelCase : Union[str, Any], **lowerCamelCase : Optional[Any] ):
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase__ = torch.ones([0] )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase )
return self
return Out()
return extract
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=lowerCamelCase, set_alpha_to_one=lowerCamelCase, )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
lowercase__ = output.images
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=lowerCamelCase, )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=lowerCamelCase )
assert isinstance(lowerCamelCase, lowerCamelCase )
assert isinstance(pipe.scheduler, lowerCamelCase )
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase )
lowercase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_cond_unet
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase )
lowercase__ = self.dummy_vae
lowercase__ = self.dummy_text_encoder
lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
lowercase__ = unet.half()
lowercase__ = vae.half()
lowercase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ = StableDiffusionPipeline(
unet=lowerCamelCase, scheduler=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, safety_checker=lowerCamelCase, feature_extractor=self.dummy_extractor, )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
lowercase__ = 4_003_660_346
lowercase__ = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=lowerCamelCase )
lowercase__ = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = '''padme amidala taking a bath artwork, safe for work, no nudity'''
lowercase__ = 2_734_971_755
lowercase__ = 7
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
lowercase__ = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
lowercase__ = 1_044_355_234
lowercase__ = 12
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=0, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase__ = torch.manual_seed(lowerCamelCase )
lowercase__ = sd_pipe(
[prompt], generator=lowerCamelCase, guidance_scale=lowerCamelCase, num_inference_steps=50, output_type='''np''', width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
A__ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple, *lowerCamelCase : List[Any], **lowerCamelCase : Tuple ):
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError('''String lengths must match!''' )
lowercase__ = 0
for chara, chara in zip(lowerCamelCase_ , lowerCamelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import os
def a ( ):
'''simple docstring'''
lowercase__ = os.path.join(os.path.dirname(lowerCamelCase_ ) , '''num.txt''' )
with open(lowerCamelCase_ ) as file_hand:
return str(sum(int(lowerCamelCase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 671 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A__ : Optional[int] = random.Random()
def a ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
if rng is None:
lowercase__ = global_rng
lowercase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : List[str], lowerCamelCase : Dict=7, lowerCamelCase : Dict=400, lowerCamelCase : Dict=2_000, lowerCamelCase : Optional[int]=1, lowerCamelCase : Tuple=0.0, lowerCamelCase : Tuple=16_000, lowerCamelCase : Optional[int]=True, lowerCamelCase : List[Any]=80, lowerCamelCase : str=16, lowerCamelCase : List[Any]=64, lowerCamelCase : List[Any]="hann_window", lowerCamelCase : Any=80, lowerCamelCase : List[Any]=7_600, lowerCamelCase : Any=1E-10, lowerCamelCase : Any=True, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = min_seq_length
lowercase__ = max_seq_length
lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__ = feature_size
lowercase__ = padding_value
lowercase__ = sampling_rate
lowercase__ = do_normalize
lowercase__ = num_mel_bins
lowercase__ = hop_length
lowercase__ = win_length
lowercase__ = win_function
lowercase__ = fmin
lowercase__ = fmax
lowercase__ = mel_floor
lowercase__ = return_attention_mask
def lowercase__ ( self : Dict ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowercase__ ( self : List[str], lowerCamelCase : Dict=False, lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
def _flatten(lowerCamelCase : List[str] ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
lowercase__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
def lowercase__ ( self : Dict, lowerCamelCase : List[Any]=False, lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
if equal_length:
lowercase__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowercase__ = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = SpeechTaFeatureExtractor
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = SpeechTaFeatureExtractionTester(self )
def lowercase__ ( self : Tuple, lowerCamelCase : List[Any] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase, axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase, axis=0 ) - 1 ) < 1E-3 ) )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase__ = feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
lowercase__ = feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test batched
lowercase__ = feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
lowercase__ = feat_extract(lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase__ = [None, 1_600, None]
for max_length, padding in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = feat_extract(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors='''np''' )
lowercase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = range(800, 1_400, 200 )
lowercase__ = [floats_list((1, x) )[0] for x in lengths]
lowercase__ = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase__ = [None, 1_600, None]
for max_length, padding in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = feat_extract(lowerCamelCase, max_length=lowerCamelCase, padding=lowerCamelCase )
lowercase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=1_000, padding='''max_length''', return_tensors='''np''' )
lowercase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=1_000, padding='''longest''', return_tensors='''np''' )
lowercase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = feat_extract(
lowerCamelCase, truncation=lowerCamelCase, max_length=2_000, padding='''longest''', return_tensors='''np''' )
lowercase__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase__ = np.random.rand(100 ).astype(np.floataa )
lowercase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase__ = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase__ = feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase__ = [floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowercase__ = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test feature size
lowercase__ = feature_extractor(audio_target=lowerCamelCase, padding=lowerCamelCase, return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
lowercase__ = feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_values
lowercase__ = feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test batched
lowercase__ = feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_values
lowercase__ = feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__ = np.asarray(lowerCamelCase )
lowercase__ = feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_values
lowercase__ = feature_extractor(lowerCamelCase, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase, lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase, lowerCamelCase, atol=1E-3 ) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase, processed_features[input_name] ) ) )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ = BatchFeature({input_name: speech_inputs}, tensor_type='''np''' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase )
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs}, tensor_type='''pt''' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.num_mel_bins # hack!
lowercase__ = feat_extract.pad(lowerCamelCase, padding='''longest''', return_tensors='''np''' )[input_name]
lowercase__ = feat_extract.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCamelCase )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.num_mel_bins # hack!
lowercase__ = feat_extract.pad(lowerCamelCase, padding='''longest''', return_tensors='''np''' )
self.assertIn('''attention_mask''', lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCamelCase )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_target()
lowercase__ = [len(lowerCamelCase ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = min(lowerCamelCase )
lowercase__ = feat_extract.num_mel_bins # hack!
lowercase__ = feat_extract.pad(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''np''' )
self.assertIn('''attention_mask''', lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
def lowercase__ ( self : str, lowerCamelCase : Any ):
'''simple docstring'''
from datasets import load_dataset
lowercase__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
lowercase__ = ds.sort('''id''' ).select(range(lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
# fmt: off
lowercase__ = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = SpeechTaFeatureExtractor()
lowercase__ = feature_extractor(lowerCamelCase, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30], lowerCamelCase, atol=1E-6 ) )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
# fmt: off
lowercase__ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
lowercase__ = self._load_datasamples(1 )
lowercase__ = SpeechTaFeatureExtractor()
lowercase__ = feature_extractor(audio_target=lowerCamelCase, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], lowerCamelCase, atol=1E-4 ) )
| 671 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple=100, lowerCamelCase : Any=13, lowerCamelCase : Union[str, Any]=30, lowerCamelCase : Dict=2, lowerCamelCase : Optional[int]=3, lowerCamelCase : int=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]=32, lowerCamelCase : Any=4, lowerCamelCase : int=4, lowerCamelCase : int=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Dict=10, lowerCamelCase : int=0.02, lowerCamelCase : List[str]=3, lowerCamelCase : List[Any]=None, lowerCamelCase : List[str]=[0, 1, 2, 3], ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 100
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = out_indices
lowercase__ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, out_indices=self.out_indices, )
def lowercase__ ( self : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = BeitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = BeitForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = BeitForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = BeitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear ) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
def lowercase__ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]:
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F"""Parameter {name} of model {model_class} seems not properly initialized""", )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = BeitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).pixel_values.to(lowerCamelCase )
# prepare bool_masked_pos
lowercase__ = torch.ones((1, 196), dtype=torch.bool ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(pixel_values=lowerCamelCase, bool_masked_pos=lowerCamelCase )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape, lowerCamelCase )
lowercase__ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3], lowerCamelCase, atol=1E-2 ) )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase, atol=1E-4 ) )
lowercase__ = 281
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase )
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 21_841) )
self.assertEqual(logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3], lowerCamelCase, atol=1E-4 ) )
lowercase__ = 2_396
self.assertEqual(logits.argmax(-1 ).item(), lowerCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase__ = model.to(lowerCamelCase )
lowercase__ = BeitImageProcessor(do_resize=lowerCamelCase, size=640, do_center_crop=lowerCamelCase )
lowercase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowercase__ = Image.open(ds[0]['''file'''] )
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
lowercase__ = outputs.logits
# verify the logits
lowercase__ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape, lowerCamelCase )
lowercase__ = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase__ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
], device=lowerCamelCase, )
else:
lowercase__ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase__ = model.to(lowerCamelCase )
lowercase__ = BeitImageProcessor(do_resize=lowerCamelCase, size=640, do_center_crop=lowerCamelCase )
lowercase__ = load_dataset('''hf-internal-testing/fixtures_ade20k''', split='''test''' )
lowercase__ = Image.open(ds[0]['''file'''] )
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
lowercase__ = outputs.logits.detach().cpu()
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(500, 300)] )
lowercase__ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape, lowerCamelCase )
lowercase__ = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
lowercase__ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape, lowerCamelCase )
| 671 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 1 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A__ : Any = 'facebook/wmt19-en-de'
A__ : Union[str, Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A__ : Optional[int] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A__ : str = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
A__ : int = tokenizer(['Making tiny model'], return_tensors='pt')
A__ : Union[str, Any] = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
A__ : str = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 671 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 1 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase__ = range(3 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a ( lowerCamelCase_ , lowerCamelCase_=1 , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = factor * value
lowercase__ = value
while not is_prime(lowerCamelCase_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCamelCase_ )
return value
| 671 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowercase__ = set()
return any(
node not in visited and depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for node in graph )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
visited.add(lowerCamelCase_ )
rec_stk.add(lowerCamelCase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 671 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A__ : Optional[int] = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a ( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = True
while ask_again:
lowercase__ = input(lowerCamelCase_ )
try:
if default is not None and len(lowerCamelCase_ ) == 0:
return default
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_=[] , lowerCamelCase_=None , lowerCamelCase_=0 ):
'''simple docstring'''
lowercase__ = BulletMenu(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = menu.run(default_choice=lowerCamelCase_ )
return convert_value(lowerCamelCase_ ) if convert_value is not None else result
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(lowerCamelCase_ )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowercase__ ( self : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = super()._format_usage(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = usage.replace('''<command> [<args>] ''', '''''' )
return usage
| 671 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCamelCase_ ) , lowerCamelCase_ )
return number - int(lowerCamelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 671 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A__ : Dict = logging.getLogger(__name__)
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowerCamelCase_ , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowerCamelCase_ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowerCamelCase_ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowerCamelCase_ , default='''data/dump''' , help='''The dump file prefix.''' )
lowercase__ = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowercase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowercase__ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowercase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowercase__ = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowercase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowercase__ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowercase__ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowercase__ = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(lowerCamelCase_ )} examples to process.""" )
lowercase__ = []
lowercase__ = 0
lowercase__ = 1_0000
lowercase__ = time.time()
for text in data:
lowercase__ = F"""{bos} {text.strip()} {sep}"""
lowercase__ = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
rslt.append(lowerCamelCase_ )
iter += 1
if iter % interval == 0:
lowercase__ = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowercase__ = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(lowerCamelCase_ )} examples processed.""" )
lowercase__ = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowercase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowercase__ = [np.uintaa(lowerCamelCase_ ) for d in rslt]
else:
lowercase__ = [np.intaa(lowerCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCamelCase_ , '''wb''' ) as handle:
pickle.dump(rslt_ , lowerCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 671 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 1 |
import math
def a ( lowerCamelCase_ ):
'''simple docstring'''
return math.sqrt(lowerCamelCase_ ) * math.sqrt(lowerCamelCase_ ) == num
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = n
while left <= right:
lowercase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowercase__ = mid - 1
else:
lowercase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return model
@property
def lowercase__ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), cross_attention_dim=10, )
return model
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D'''), up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D'''), )
lowercase__ = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=('''AttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''AttnUpBlock2D'''), )
return vqvae, unet
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
lowercase__ = DDPMScheduler()
lowercase__ = AudioDiffusionPipeline(vqvae=lowerCamelCase, unet=self.dummy_unet, mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase, steps=4 )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase, steps=4, return_dict=lowerCamelCase )
lowercase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.frombuffer(image_from_tuple.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
lowercase__ = DDIMScheduler()
lowercase__ = self.dummy_vqvae_and_unet
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
np.random.seed(0 )
lowercase__ = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(raw_audio=lowerCamelCase, generator=lowerCamelCase, start_step=5, steps=10 )
lowercase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase__ = self.dummy_unet_condition
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=lowerCamelCase, mel=lowerCamelCase, scheduler=lowerCamelCase )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
np.random.seed(0 )
lowercase__ = torch.rand((1, 1, 10) )
lowercase__ = pipe(generator=lowerCamelCase, encoding=lowerCamelCase )
lowercase__ = output.images[0]
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = torch_device
lowercase__ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowercase__ = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowercase__ = torch.Generator(device=lowerCamelCase ).manual_seed(42 )
lowercase__ = pipe(generator=lowerCamelCase )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase__ = np.frombuffer(image.tobytes(), dtype='''uint8''' )[:10]
lowercase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 671 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
A__ : Any = False
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Optional[int], lowerCamelCase : List[Any]=32 ):
'''simple docstring'''
set_seed(0 )
lowercase__ = UNetaDModel(sample_size=lowerCamelCase, in_channels=3, out_channels=3 )
lowercase__ = torch.optim.SGD(model.parameters(), lr=0.0001 )
return model, optimizer
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase__ = DDPMScheduler(
num_train_timesteps=1_000, beta_start=0.0001, beta_end=0.02, beta_schedule='''linear''', clip_sample=lowerCamelCase, )
lowercase__ = DDIMScheduler(
num_train_timesteps=1_000, beta_start=0.0001, beta_end=0.02, beta_schedule='''linear''', clip_sample=lowerCamelCase, )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase__ = [torch.randn((4, 3, 32, 32) ).clip(-1, 1 ).to(lowerCamelCase ) for _ in range(4 )]
lowercase__ = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase ) for _ in range(4 )]
lowercase__ = [torch.randint(0, 1_000, (4,) ).long().to(lowerCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase__ , lowercase__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
lowercase__ = model(lowerCamelCase, timesteps[i] ).sample
lowercase__ = torch.nn.functional.mse_loss(lowerCamelCase, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase__ , lowercase__ = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i] )
lowercase__ = model(lowerCamelCase, timesteps[i] ).sample
lowercase__ = torch.nn.functional.mse_loss(lowerCamelCase, noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-5 ) )
| 671 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def a ( lowerCamelCase_ , lowerCamelCase_="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) as f:
lowercase__ = json.load(lowerCamelCase_ )
lowercase__ = {}
lowercase__ = []
lowercase__ = []
for key, info in class_info.items():
lowercase__ = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(lowerCamelCase_ ) )
lowercase__ = thing_ids
lowercase__ = class_names
return metadata
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : str=7, lowerCamelCase : int=3, lowerCamelCase : List[str]=30, lowerCamelCase : Union[str, Any]=400, lowerCamelCase : int=None, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Dict=True, lowerCamelCase : int=[0.5, 0.5, 0.5], lowerCamelCase : Dict=[0.5, 0.5, 0.5], lowerCamelCase : List[str]=10, lowerCamelCase : Tuple=False, lowerCamelCase : Dict=255, lowerCamelCase : Any="shi-labs/oneformer_demo", lowerCamelCase : Tuple="ade20k_panoptic.json", lowerCamelCase : List[Any]=10, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = {'''shortest_edge''': 32, '''longest_edge''': 1_333} if size is None else size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = class_info_file
lowercase__ = prepare_metadata(lowerCamelCase, lowerCamelCase )
lowercase__ = num_text
lowercase__ = repo_path
# for the post_process_functions
lowercase__ = 2
lowercase__ = 10
lowercase__ = 10
lowercase__ = 3
lowercase__ = 4
lowercase__ = num_labels
lowercase__ = do_reduce_labels
lowercase__ = ignore_index
def lowercase__ ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : str=False ):
'''simple docstring'''
if not batched:
lowercase__ = image_inputs[0]
if isinstance(lowerCamelCase, Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size['''shortest_edge'''] * h / w )
lowercase__ = self.size['''shortest_edge''']
elif w > h:
lowercase__ = self.size['''shortest_edge''']
lowercase__ = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase__ = self.size['''shortest_edge''']
lowercase__ = self.size['''shortest_edge''']
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(lowerCamelCase, key=lambda lowerCamelCase : item[0] )[0]
lowercase__ = max(lowerCamelCase, key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowercase__ = image_processing_class
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = OneFormerImageProcessorTester(self )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''ignore_index''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''class_info_file''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''num_text''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''repo_path''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''metadata''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_reduce_labels''' ) )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processing_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase )
lowercase__ = image_processor(
lowerCamelCase, ['''semantic'''] * len(lowerCamelCase ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processing_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase )
lowercase__ = image_processor(
lowerCamelCase, ['''semantic'''] * len(lowerCamelCase ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processing_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase__ , lowercase__ = self.image_processing_tester.get_expected_values(lowerCamelCase, batched=lowerCamelCase )
lowercase__ = image_processor(
lowerCamelCase, ['''semantic'''] * len(lowerCamelCase ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str]=False, lowerCamelCase : Any=False, lowerCamelCase : int="np" ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
lowercase__ = self.image_processing_tester.num_labels
lowercase__ = None
lowercase__ = None
lowercase__ = prepare_image_inputs(self.image_processing_tester, equal_resolution=lowerCamelCase )
if with_segmentation_maps:
lowercase__ = num_labels
if is_instance_map:
lowercase__ = list(range(lowerCamelCase ) ) * 2
lowercase__ = dict(enumerate(lowerCamelCase ) )
lowercase__ = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
lowercase__ = [Image.fromarray(lowerCamelCase ) for annotation in annotations]
lowercase__ = image_processor(
lowerCamelCase, ['''semantic'''] * len(lowerCamelCase ), lowerCamelCase, return_tensors='''pt''', instance_id_to_semantic_id=lowerCamelCase, pad_and_return_pixel_mask=lowerCamelCase, )
return inputs
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
def common(lowerCamelCase : Optional[Any]=False, lowerCamelCase : Tuple=None ):
lowercase__ = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCamelCase, is_instance_map=lowerCamelCase, segmentation_type=lowerCamelCase )
lowercase__ = inputs['''mask_labels''']
lowercase__ = inputs['''class_labels''']
lowercase__ = inputs['''pixel_values''']
lowercase__ = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(lowerCamelCase ), self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCamelCase )
common(is_instance_map=lowerCamelCase, segmentation_type='''pil''' )
common(is_instance_map=lowerCamelCase, segmentation_type='''pil''' )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = np.zeros((20, 50) )
lowercase__ = 1
lowercase__ = 1
lowercase__ = 1
lowercase__ = binary_mask_to_rle(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), 4 )
self.assertEqual(rle[0], 21 )
self.assertEqual(rle[1], 45 )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
lowercase__ = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ = fature_extractor.post_process_semantic_segmentation(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
lowercase__ = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
lowercase__ = fature_extractor.post_process_semantic_segmentation(lowerCamelCase, target_sizes=lowerCamelCase )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
lowercase__ = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ = image_processor.post_process_instance_segmentation(lowerCamelCase, threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), lowerCamelCase )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
lowercase__ = self.image_processing_tester.get_fake_oneformer_outputs()
lowercase__ = image_processor.post_process_panoptic_segmentation(lowerCamelCase, threshold=0 )
self.assertTrue(len(lowerCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), lowerCamelCase )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 671 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 1 |
import os
import numpy
import onnx
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = a.name
lowercase__ = b.name
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = a == b
lowercase__ = name_a
lowercase__ = name_b
return res
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = list(model.graph.initializer )
lowercase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase__ = inits[i].name
lowercase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = os.path.dirname(lowerCamelCase_ )
lowercase__ = os.path.basename(lowerCamelCase_ )
lowercase__ = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = list(model.graph.initializer )
lowercase__ = set()
lowercase__ = {}
lowercase__ = []
lowercase__ = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
lowercase__ = inits[j].data_type
lowercase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , lowerCamelCase_ )
total_reduced_size += mem_size
lowercase__ = inits[i].name
lowercase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
lowercase__ = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
lowercase__ = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = '''optimized_''' + model_file_name
lowercase__ = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 671 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = '▁'
A__ : Tuple = {'vocab_file': 'spiece.model'}
A__ : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
A__ : List[Any] = {
'google/pegasus-xsum': 5_12,
}
A__ : int = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict, lowerCamelCase : Any, lowerCamelCase : List[str]="<pad>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : int="<unk>", lowerCamelCase : Optional[int]="<mask_2>", lowerCamelCase : Tuple="<mask_1>", lowerCamelCase : Any=None, lowerCamelCase : Optional[int]=103, lowerCamelCase : Optional[Dict[str, Any]] = None, **lowerCamelCase : Tuple, ):
'''simple docstring'''
lowercase__ = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError(
F"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is"""
F""" {type(lowerCamelCase )}""" )
lowercase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(lowerCamelCase ), self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
lowercase__ = additional_special_tokens_extended
else:
lowercase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2, self.offset )]
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase, unk_token=lowerCamelCase, mask_token=lowerCamelCase, pad_token=lowerCamelCase, mask_token_sent=lowerCamelCase, offset=lowerCamelCase, additional_special_tokens=lowerCamelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase, )
lowercase__ = mask_token_sent
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
# add special tokens to encoder dict
lowercase__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
lowercase__ = {v: k for k, v in self.encoder.items()}
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.offset
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : List[Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase, out_type=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : str ):
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowercase__ = self.sp_model.piece_to_id(lowerCamelCase )
return sp_id + self.offset
def lowercase__ ( self : Any, lowerCamelCase : int ):
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowercase__ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase__ ( self : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
lowercase__ = []
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def lowercase__ ( self : str, lowerCamelCase : Dict=False ):
'''simple docstring'''
return 1
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : Optional[Any], lowerCamelCase : List, lowerCamelCase : Optional[List] = None, lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 671 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''file.csv'''
lowercase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''malformed_file.csv'''
lowercase__ = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_image.csv'''
lowercase__ = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_label.csv'''
lowercase__ = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = tmp_path / '''csv_with_int_list.csv'''
lowercase__ = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = Csv()
lowercase__ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowercase__ = csv._generate_tables([[csv_file_with_image]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowercase__ = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def a ( lowerCamelCase_ ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read().splitlines()[1:]
lowercase__ = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowercase__ = csv._generate_tables([[csv_file_with_label]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowercase__ = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(lowerCamelCase_ ) for label in labels]
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
lowercase__ = csv._generate_tables([[csv_file_with_int_list]] )
lowercase__ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowercase__ = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 671 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 1 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE_ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE_ ):
total += i
return total - n
def a ( lowerCamelCase_ = 1_0000 ):
'''simple docstring'''
lowercase__ = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE_ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE_ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[int] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[str] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
A__ : int = {
'yjernite/retribert-base-uncased': 5_12,
}
A__ : Tuple = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _snake_case ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple, lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Union[str, Any]="[UNK]", lowerCamelCase : Union[str, Any]="[SEP]", lowerCamelCase : Dict="[PAD]", lowerCamelCase : Any="[CLS]", lowerCamelCase : List[str]="[MASK]", lowerCamelCase : Dict=True, lowerCamelCase : List[str]=None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenize_chinese_chars=lowerCAmelCase__, strip_accents=lowerCAmelCase__, **lowerCAmelCase__, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCAmelCase__, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCAmelCase__ )
lowercase__ = do_lower_case
def lowercase__ ( self : List[Any], lowerCamelCase : Any, lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ : Optional[Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Tuple = logging.get_logger(__name__)
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(lowerCamelCase_ ) - 2] )
lowercase__ = int(key_list[key_list.index(lowerCamelCase_ ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(F"""{orig_block_num}.{layer_num}.{original_name}""" , F"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(lowerCamelCase_ , F"""patch_embeddings.{total_embed_found}.""" )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(lowerCamelCase_ , lowerCamelCase_ , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def a ( ):
'''simple docstring'''
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 1000
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 1000)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 128, 320, 512]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 128, 320, 512]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 128, 320, 512]
lowercase__ = 4.0
lowercase__ = 1e-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 192, 384, 768]
lowercase__ = 4.0
lowercase__ = 1e-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 192, 384, 768]
lowercase__ = 4.0
lowercase__ = 1e-6
lowercase__ = 0.95
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=lowerCamelCase_ )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
lowercase__ = torch.load(lowerCamelCase_ , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(lowerCamelCase_ )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=lowerCamelCase_ )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(lowerCamelCase_ )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
lowercase__ = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
lowercase__ = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
lowercase__ = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
lowercase__ = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(F"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
A__ : Union[str, Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
from collections.abc import Callable
import numpy as np
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = int(np.ceil((x_end - xa) / step_size ) )
lowercase__ = np.zeros((n + 1,) )
lowercase__ = ya
lowercase__ = xa
for k in range(_lowercase ):
lowercase__ = y[k] + step_size * ode_func(_lowercase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A__ : Tuple = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A__ : int = logging.get_logger(__name__)
def a ( ):
'''simple docstring'''
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ = json.loads(lowerCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ = json.loads(lowerCamelCase_ )
if not mpi_options.get('''sagemaker_mpi_enabled''' , lowerCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _UpperCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
lowercase__ = field(
default="""""" ,metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} ,)
def lowercase__ ( self : str ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''', UpperCAmelCase_, )
@cached_property
def lowercase__ ( self : Dict ):
'''simple docstring'''
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
lowercase__ = torch.device('''cpu''' )
lowercase__ = 0
elif is_sagemaker_model_parallel_available():
lowercase__ = smp.local_rank()
lowercase__ = torch.device('''cuda''', UpperCAmelCase_ )
lowercase__ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''', timeout=self.ddp_timeout_delta )
lowercase__ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
lowercase__ = torch.device('''cuda''', self.local_rank )
lowercase__ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''', timeout=self.ddp_timeout_delta )
lowercase__ = torch.device('''cuda''', self.local_rank )
lowercase__ = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase_ )
return device
@property
def lowercase__ ( self : Dict ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
return False
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
A__ : Tuple = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=UpperCAmelCase_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=UpperCAmelCase_ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=UpperCAmelCase_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
lowercase__ = field(default=UpperCAmelCase_ ,metadata={"""help""": """Whether tp freeze the encoder."""} )
lowercase__ = field(default=UpperCAmelCase_ ,metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowercase__ = field(
default="""summarization""" ,metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} ,)
lowercase__ = field(
default=1_024 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} ,)
lowercase__ = field(
default=142 ,metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(default=-1 ,metadata={"""help""": """# training examples. -1 means use all."""} )
lowercase__ = field(default=-1 ,metadata={"""help""": """# validation examples. -1 means use all."""} )
lowercase__ = field(default=-1 ,metadata={"""help""": """# test examples. -1 means use all."""} )
lowercase__ = field(default=UpperCAmelCase_ ,metadata={"""help""": """Source language id for translation."""} )
lowercase__ = field(default=UpperCAmelCase_ ,metadata={"""help""": """Target language id for translation."""} )
lowercase__ = field(default=UpperCAmelCase_ ,metadata={"""help""": """# num_beams to use for evaluation."""} )
lowercase__ = field(
default=UpperCAmelCase_ ,metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} ,)
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , F"""{split}_results.json""" ) )
def a ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(lowerCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
assert hasattr(lowerCamelCase_ , lowerCamelCase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
lowerCamelCase_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , lowerCamelCase_ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , data_args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , data_collator=SeqaSeqDataCollator(
lowerCamelCase_ , lowerCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCamelCase_ , tokenizer=lowerCamelCase_ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate(metric_key_prefix='''val''' )
lowercase__ = data_args.n_val
lowercase__ = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase__ = trainer.predict(test_dataset=lowerCamelCase_ , metric_key_prefix='''test''' )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCamelCase_ , training_args.output_dir )
all_metrics.update(lowerCamelCase_ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
lowercase__ = lmap(str.strip , lowerCamelCase_ )
write_txt_file(lowerCamelCase_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCamelCase_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def a ( lowerCamelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ):
'''simple docstring'''
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ = timm.create_model('''levit_128s''' , pretrained=__A )
else:
lowercase__ = timm.create_model('''levit_128''' , pretrained=__A )
if hidden_sizes == 192:
lowercase__ = timm.create_model('''levit_192''' , pretrained=__A )
if hidden_sizes == 256:
lowercase__ = timm.create_model('''levit_256''' , pretrained=__A )
if hidden_sizes == 384:
lowercase__ = timm.create_model('''levit_384''' , pretrained=__A )
from_model.eval()
lowercase__ = LevitForImageClassificationWithTeacher(__A ).eval()
lowercase__ = OrderedDict()
lowercase__ = from_model.state_dict()
lowercase__ = list(from_model.state_dict().keys() )
lowercase__ = list(our_model.state_dict().keys() )
print(len(__A ) , len(__A ) )
for i in range(len(__A ) ):
lowercase__ = weights[og_keys[i]]
our_model.load_state_dict(__A )
lowercase__ = torch.randn((2, 3, 224, 224) )
lowercase__ = from_model(__A )
lowercase__ = our_model(__A ).logits
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
lowercase__ = name
print(__A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True ):
'''simple docstring'''
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = 1000
lowercase__ = (1, num_labels)
lowercase__ = '''huggingface/label-files'''
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(__A ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
lowercase__ = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
lowercase__ = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __A , names_to_config[model_name] , __A , __A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __A , __A , __A , __A )
return config, expected_shape
if __name__ == "__main__":
A__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
A__ : int = parser.parse_args()
A__ : Optional[int] = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A__ : Optional[Any] = NewType('DataClass', Any)
A__ : Dict = NewType('DataClassType', Any)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {str(__lowerCAmelCase ): choice for choice in choices}
return lambda lowerCamelCase_ : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase )
def a ( *,
lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = dataclasses.MISSING , lowerCamelCase_ = None , **lowerCamelCase_ , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowercase__ = {}
if aliases is not None:
lowercase__ = aliases
if help is not None:
lowercase__ = help
return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : Dict, lowerCamelCase : Tuple, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowercase__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
lowercase__ = [dataclass_types]
lowercase__ = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def lowercase__ ( lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = F"""--{field.name}"""
lowercase__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, _lowerCAmelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowercase__ = kwargs.pop('''aliases''', [] )
if isinstance(_lowerCAmelCase, _lowerCAmelCase ):
lowercase__ = [aliases]
lowercase__ = getattr(field.type, '''__origin__''', field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase, '''UnionType''' ) and isinstance(_lowerCAmelCase, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field \'{field.name}\'.""" )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
lowercase__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowercase__ = getattr(field.type, '''__origin__''', field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowercase__ = (
field.type.__args__[0] if isinstance(_lowerCAmelCase, field.type.__args__[1] ) else field.type.__args__[1]
)
lowercase__ = getattr(field.type, '''__origin__''', field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowercase__ = {}
if origin_type is Literal or (isinstance(field.type, _lowerCAmelCase ) and issubclass(field.type, _lowerCAmelCase )):
if origin_type is Literal:
lowercase__ = field.type.__args__
else:
lowercase__ = [x.value for x in field.type]
lowercase__ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowercase__ = field.default
else:
lowercase__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowercase__ = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
lowercase__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowercase__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowercase__ = default
# This tells argparse we accept 0 or 1 value after --field_name
lowercase__ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowercase__ = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase, _lowerCAmelCase ):
lowercase__ = field.type.__args__[0]
lowercase__ = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowercase__ = field.default_factory()
elif field.default is dataclasses.MISSING:
lowercase__ = True
else:
lowercase__ = field.type
if field.default is not dataclasses.MISSING:
lowercase__ = field.default
elif field.default_factory is not dataclasses.MISSING:
lowercase__ = field.default_factory()
else:
lowercase__ = True
parser.add_argument(_lowerCAmelCase, *_lowerCAmelCase, **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowercase__ = False
parser.add_argument(F"""--no_{field.name}""", action='''store_false''', dest=field.name, **_lowerCAmelCase )
def lowercase__ ( self : str, lowerCamelCase : Dict ):
'''simple docstring'''
if hasattr(_lowerCAmelCase, '''_argument_group_name''' ):
lowercase__ = self.add_argument_group(dtype._argument_group_name )
else:
lowercase__ = self
try:
lowercase__ = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
lowercase__ = '''.'''.join(map(_lowerCAmelCase, sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
lowercase__ = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase, _lowerCAmelCase )
def lowercase__ ( self : str, lowerCamelCase : Optional[int]=None, lowerCamelCase : Optional[int]=False, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Dict=None, lowerCamelCase : int=None, ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowercase__ = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowercase__ = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase, type=_lowerCAmelCase, action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowercase__ , lowercase__ = args_file_parser.parse_known_args(args=_lowerCAmelCase )
lowercase__ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('''-''' ), _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
lowercase__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowercase__ = file_args + args if args is not None else file_args + sys.argv[1:]
lowercase__ , lowercase__ = self.parse_known_args(args=_lowerCAmelCase )
lowercase__ = []
for dtype in self.dataclass_types:
lowercase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
lowercase__ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase, _lowerCAmelCase )
lowercase__ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowercase__ ( self : int, lowerCamelCase : Dict, lowerCamelCase : Optional[int] = False ):
'''simple docstring'''
lowercase__ = set(args.keys() )
lowercase__ = []
for dtype in self.dataclass_types:
lowercase__ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
lowercase__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowercase__ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}""" )
return tuple(_lowerCAmelCase )
def lowercase__ ( self : List[str], lowerCamelCase : int, lowerCamelCase : Any = False ):
'''simple docstring'''
with open(Path(_lowerCAmelCase ), encoding='''utf-8''' ) as open_json_file:
lowercase__ = json.loads(open_json_file.read() )
lowercase__ = self.parse_dict(_lowerCAmelCase, allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowercase__ ( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] = False ):
'''simple docstring'''
lowercase__ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ), allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 709 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
A__ : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default="""tab_fact""" ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default="""tab_fact""" ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ,)
lowercase__ = field(
default=1_024 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """A csv or a json file containing the training data."""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowercase__ = field(default=A__ ,metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowercase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = field(
default=A__ ,metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=A__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
lowercase__ = field(
default=A__ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,)
lowercase__ = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
lowercase__ = field(
default=A__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
def a ( ):
'''simple docstring'''
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__ = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__ = data_args.train_file.split('''.''' )[-1]
lowercase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowercase__ = load_dataset('''csv''' , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__ = load_dataset('''json''' , data_files=lowerCamelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__ = raw_datasets["train"].features["label"].names
lowercase__ = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCamelCase__ , )
lowercase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__ = {"Refused": 0, "Entailed": 1}
lowercase__ = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCamelCase_ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCamelCase_ ):
lowercase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowercase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__ = examples["statement"]
lowercase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowercase__ = tokenizer(lowerCamelCase__ , lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ )
lowercase__ = examples["label"]
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowercase__ = raw_datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowercase__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowercase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowercase__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowercase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowercase__ = raw_datasets["test"]
if data_args.max_predict_samples is not None:
lowercase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase_ ):
lowercase__ = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowercase__ = np.argmax(lowerCamelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ = default_data_collator
elif training_args.fpaa:
lowercase__ = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowercase__ = None
# Initialize our Trainer
lowercase__ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowercase__ = train_result.metrics
lowercase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowercase__ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCamelCase__ )
trainer.save_metrics('''train''' , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowercase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowercase__ = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics('''eval''' , lowerCamelCase__ )
trainer.save_metrics('''eval''' , lowerCamelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__ = predict_dataset.remove_columns('''label''' )
lowercase__ = trainer.predict(lowerCamelCase__ , metric_key_prefix='''predict''' ).predictions
lowercase__ = np.argmax(lowerCamelCase__ , axis=1 )
lowercase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCamelCase__ ):
lowercase__ = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
lowercase__ = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase__ )
else:
trainer.create_model_card(**lowerCamelCase__ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
from __future__ import annotations
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = text, pattern
lowercase__ = len(__A ), len(__A )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def lowercase__ ( self : Tuple, lowerCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowercase__ ( self : Dict ):
'''simple docstring'''
# searches pattern in text and returns index positions
lowercase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
lowercase__ = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A__ : int = 'ABAABA'
A__ : List[str] = 'AB'
A__ : Tuple = BoyerMooreSearch(text, pattern)
A__ : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 711 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : int = logging.get_logger(__name__)
A__ : Dict = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
for attribute in key.split('''.''' ):
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowercase__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__lowerCAmelCase )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , __lowerCAmelCase )
if "weight_g" in name:
lowercase__ = """weight_g"""
elif "weight_v" in name:
lowercase__ = """weight_v"""
elif "weight" in name:
lowercase__ = """weight"""
elif "bias" in name:
lowercase__ = """bias"""
else:
lowercase__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = SEWConfig()
if is_finetuned:
lowercase__ = model.wav_encoder.wav_model.cfg
else:
lowercase__ = model.cfg
lowercase__ = fs_config.conv_bias
lowercase__ = eval(fs_config.conv_feature_layers )
lowercase__ = [x[0] for x in conv_layers]
lowercase__ = [x[1] for x in conv_layers]
lowercase__ = [x[2] for x in conv_layers]
lowercase__ = """gelu"""
lowercase__ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
lowercase__ = 0.0
lowercase__ = fs_config.activation_fn.name
lowercase__ = fs_config.encoder_embed_dim
lowercase__ = 0.02
lowercase__ = fs_config.encoder_ffn_embed_dim
lowercase__ = 1e-5
lowercase__ = fs_config.encoder_layerdrop
lowercase__ = fs_config.encoder_attention_heads
lowercase__ = fs_config.conv_pos_groups
lowercase__ = fs_config.conv_pos
lowercase__ = len(__lowerCAmelCase )
lowercase__ = fs_config.encoder_layers
lowercase__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase__ = model.cfg
lowercase__ = fs_config.final_dropout
lowercase__ = fs_config.layerdrop
lowercase__ = fs_config.activation_dropout
lowercase__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase__ = fs_config.attention_dropout
lowercase__ = fs_config.dropout_input
lowercase__ = fs_config.dropout
lowercase__ = fs_config.mask_channel_length
lowercase__ = fs_config.mask_channel_prob
lowercase__ = fs_config.mask_length
lowercase__ = fs_config.mask_prob
lowercase__ = """Wav2Vec2FeatureExtractor"""
lowercase__ = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=True ):
'''simple docstring'''
if is_finetuned:
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase__ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase__ = convert_config(model[0] , __lowerCAmelCase )
lowercase__ = model[0].eval()
lowercase__ = True if config.feat_extract_norm == """layer""" else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(__lowerCAmelCase , '''vocab.json''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
lowercase__ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , )
lowercase__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
lowercase__ = SEWForCTC(__lowerCAmelCase )
else:
lowercase__ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A__ : Dict = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 712 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
A__ : List[str] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if args.student_type == "roberta":
lowercase__ = False
elif args.student_type == "gpt2":
lowercase__ = False
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if args.student_type == "roberta":
lowercase__ = False
def a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__UpperCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__UpperCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__UpperCAmelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__UpperCAmelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__UpperCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__UpperCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__UpperCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__UpperCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__UpperCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__UpperCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__UpperCAmelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__UpperCAmelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__UpperCAmelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__UpperCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__UpperCAmelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__UpperCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__UpperCAmelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__UpperCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__UpperCAmelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__UpperCAmelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__UpperCAmelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__UpperCAmelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__UpperCAmelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__UpperCAmelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__UpperCAmelCase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__UpperCAmelCase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__UpperCAmelCase , default=4000 , help='''Checkpoint interval.''' )
lowercase__ = parser.parse_args()
sanity_checks(__UpperCAmelCase )
# ARGS #
init_gpu_params(__UpperCAmelCase )
set_seed(__UpperCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__UpperCAmelCase ) , __UpperCAmelCase , indent=4 )
git_log(args.dump_path )
lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.student_type]
lowercase__ , lowercase__ , lowercase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase__ = tokenizer.all_special_tokens.index(__UpperCAmelCase )
lowercase__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
lowercase__ = special_tok_ids
lowercase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
lowercase__ = pickle.load(__UpperCAmelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
lowercase__ = pickle.load(__UpperCAmelCase )
lowercase__ = np.maximum(__UpperCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase__ = 0.0 # do not predict special tokens
lowercase__ = torch.from_numpy(__UpperCAmelCase )
else:
lowercase__ = None
lowercase__ = LmSeqsDataset(params=__UpperCAmelCase , data=__UpperCAmelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
lowercase__ = student_config_class.from_pretrained(args.student_config )
lowercase__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
lowercase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCAmelCase )
else:
lowercase__ = student_model_class(__UpperCAmelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCAmelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__UpperCAmelCase , __UpperCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__UpperCAmelCase , __UpperCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase__ = Distiller(
params=__UpperCAmelCase , dataset=__UpperCAmelCase , token_probs=__UpperCAmelCase , student=__UpperCAmelCase , teacher=__UpperCAmelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A__ : str = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = SavedModel()
lowercase__ = []
with open(os.path.join(lowerCAmelCase__ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
lowercase__ = json.load(lowerCAmelCase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase__ )] )
with open(lowerCAmelCase__ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
lowercase__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase__ = sorted(lowerCAmelCase__ )
lowercase__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase__ )
if strict and len(lowerCAmelCase__ ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(lowerCAmelCase__ ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*lowerCAmelCase__ , sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
A__ : List[str] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ = mf_knapsack(i - 1 , __A , __A , __A )
else:
lowercase__ = max(
mf_knapsack(i - 1 , __A , __A , __A ) , mf_knapsack(i - 1 , __A , __A , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ = val
return f[i][j]
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ = dp[i - 1][w_]
return dp[n][w_], dp
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if not (isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
lowercase__ = len(__A )
if num_items != len(__A ):
lowercase__ = (
"""The number of weights must be the same as the number of values.\n"""
F"""But got {num_items} weights and {len(__A )} values"""
)
raise ValueError(__A )
for i in range(__A ):
if not isinstance(wt[i] , __A ):
lowercase__ = (
"""All weights must be integers but got weight of """
F"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__A )
lowercase__ = knapsack(__A , __A , __A , __A )
lowercase__ = set()
_construct_solution(__A , __A , __A , __A , __A )
return optimal_val, example_optional_set
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__A , __A , i - 1 , __A , __A )
else:
optimal_set.add(__A )
_construct_solution(__A , __A , i - 1 , j - wt[i - 1] , __A )
if __name__ == "__main__":
A__ : Any = [3, 2, 4, 4]
A__ : str = [4, 3, 2, 3]
A__ : Dict = 4
A__ : str = 6
A__ : List[str] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
A__ : Union[str, Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
A__ : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _UpperCAmelCase ( a__ ):
"""simple docstring"""
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BlipImageProcessor"""
lowercase__ = """AutoTokenizer"""
def __init__( self : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(_A, _A )
# add QFormer tokenizer
lowercase__ = qformer_tokenizer
def __call__( self : Dict, lowerCamelCase : str = None, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : List[Any] = True, lowerCamelCase : List[str] = False, lowerCamelCase : Dict = None, lowerCamelCase : List[Any] = None, lowerCamelCase : List[str] = 0, lowerCamelCase : Any = None, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : str = False, lowerCamelCase : Dict = False, lowerCamelCase : Tuple = False, lowerCamelCase : Dict = False, lowerCamelCase : List[str] = False, lowerCamelCase : int = True, lowerCamelCase : Any = None, **lowerCamelCase : Dict, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase__ = BatchFeature()
if text is not None:
lowercase__ = self.tokenizer(
text=_A, add_special_tokens=_A, padding=_A, truncation=_A, max_length=_A, stride=_A, pad_to_multiple_of=_A, return_attention_mask=_A, return_overflowing_tokens=_A, return_special_tokens_mask=_A, return_offsets_mapping=_A, return_token_type_ids=_A, return_length=_A, verbose=_A, return_tensors=_A, **_A, )
encoding.update(_A )
lowercase__ = self.qformer_tokenizer(
text=_A, add_special_tokens=_A, padding=_A, truncation=_A, max_length=_A, stride=_A, pad_to_multiple_of=_A, return_attention_mask=_A, return_overflowing_tokens=_A, return_special_tokens_mask=_A, return_offsets_mapping=_A, return_token_type_ids=_A, return_length=_A, verbose=_A, return_tensors=_A, **_A, )
lowercase__ = qformer_text_encoding.pop('''input_ids''' )
lowercase__ = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase__ = self.image_processor(_A, return_tensors=_A )
encoding.update(_A )
return encoding
def lowercase__ ( self : Tuple, *lowerCamelCase : str, **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A, **_A )
def lowercase__ ( self : int, *lowerCamelCase : Optional[int], **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*_A, **_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowercase__ ( self : Any, lowerCamelCase : List[str], **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if os.path.isfile(_A ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_A, exist_ok=_A )
lowercase__ = os.path.join(_A, '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(_A )
return super().save_pretrained(_A, **_A )
@classmethod
def lowercase__ ( cls : Union[str, Any], lowerCamelCase : Any, **lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(_A, subfolder='''qformer_tokenizer''' )
lowercase__ = cls._get_arguments_from_pretrained(_A, **_A )
args.append(_A )
return cls(*_A )
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
lowercase__ = (0, 0)
lowercase__ = None
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def __eq__( self : Tuple, lowerCamelCase : int ):
'''simple docstring'''
return self.position == cell.position
def lowercase__ ( self : Dict ):
'''simple docstring'''
print(self.position )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : List[Any]=(5, 5) ):
'''simple docstring'''
lowercase__ = np.zeros(__lowerCamelCase )
lowercase__ = world_size[0]
lowercase__ = world_size[1]
def lowercase__ ( self : Dict ):
'''simple docstring'''
print(self.w )
def lowercase__ ( self : str, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase__ = cell.position[0]
lowercase__ = cell.position[1]
lowercase__ = []
for n in neughbour_cord:
lowercase__ = current_x + n[0]
lowercase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase__ = Cell()
lowercase__ = (x, y)
lowercase__ = cell
neighbours.append(__lowerCamelCase )
return neighbours
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
_open.append(_lowerCamelCase )
while _open:
lowercase__ = np.argmin([n.f for n in _open] )
lowercase__ = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
lowercase__ = current.g + 1
lowercase__ = n.position
lowercase__ = goal.position
lowercase__ = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
lowercase__ = []
while current.parent is not None:
path.append(current.position )
lowercase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A__ : int = Gridworld()
# Start position and goal
A__ : Dict = Cell()
A__ : List[str] = (0, 0)
A__ : List[str] = Cell()
A__ : Dict = (4, 4)
print(F"path from {start.position} to {goal.position}")
A__ : Dict = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A__ : Dict = 1
print(world.w)
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
A__ : Optional[int] = {"vocab_file": "vocab.json"}
A__ : Dict = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
A__ : Optional[Any] = {"mgp-str": 27}
class _UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int]="[GO]", lowerCamelCase : int="[GO]", lowerCamelCase : str="[s]", lowerCamelCase : Dict="[GO]", **lowerCamelCase : List[str] ):
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, pad_token=lowerCamelCase_, **lowerCamelCase_, )
with open(lowerCamelCase_, encoding='''utf-8''' ) as vocab_handle:
lowercase__ = json.load(lowerCamelCase_ )
lowercase__ = {v: k for k, v in self.vocab.items()}
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return len(self.vocab )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return dict(self.vocab, **self.added_tokens_encoder )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = []
for s in text:
char_tokens.extend(lowerCamelCase_ )
return char_tokens
def lowercase__ ( self : Dict, lowerCamelCase : Tuple ):
'''simple docstring'''
return self.vocab.get(lowerCamelCase_, self.vocab.get(self.unk_token ) )
def lowercase__ ( self : List[str], lowerCamelCase : Dict ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
lowercase__ = os.path.join(
lowerCamelCase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(lowerCamelCase_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab, indent=2, sort_keys=lowerCamelCase_, ensure_ascii=lowerCamelCase_ ) + '''\n''' )
return (vocab_file,)
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowercase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ = tokenizer('''Hello there''', return_tensors='''np''' ).input_ids
lowercase__ = tokenizer('''Hi I am''', return_tensors='''np''' ).input_ids
lowercase__ = shift_tokens_right(__UpperCamelCase, model.config.pad_token_id, model.config.decoder_start_token_id )
lowercase__ = model(__UpperCamelCase, decoder_input_ids=__UpperCamelCase ).logits
lowercase__ = optax.softmax_cross_entropy(__UpperCamelCase, onehot(__UpperCamelCase, logits.shape[-1] ) ).mean()
lowercase__ = -(labels.shape[-1] * loss.item())
lowercase__ = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 719 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : Union[str, Any] = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = ["""MobileNetV2FeatureExtractor"""]
A__ : Dict = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : Dict = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
A__ : Optional[Any] = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = GPTaTokenizer
def __init__( self : List[str], lowerCamelCase : List[str]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Dict=None, lowerCamelCase : Tuple="<|endoftext|>", lowerCamelCase : int="<|endoftext|>", lowerCamelCase : List[str]="<|endoftext|>", lowerCamelCase : Optional[Any]=False, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
super().__init__(
_lowercase, _lowercase, tokenizer_file=_lowercase, unk_token=_lowercase, bos_token=_lowercase, eos_token=_lowercase, add_prefix_space=_lowercase, **_lowercase, )
lowercase__ = kwargs.pop('''add_bos_token''', _lowercase )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', _lowercase ) != add_prefix_space:
lowercase__ = getattr(_lowercase, pre_tok_state.pop('''type''' ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**_lowercase )
lowercase__ = add_prefix_space
def lowercase__ ( self : Optional[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase, **_lowercase )
def lowercase__ ( self : Dict, *lowerCamelCase : List[Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = kwargs.get('''is_split_into_words''', _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase, **_lowercase )
def lowercase__ ( self : Optional[int], lowerCamelCase : Any, lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(_lowercase, name=_lowercase )
return tuple(_lowercase )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowercase, add_special_tokens=_lowercase ) + [self.eos_token_id] )
if len(_lowercase ) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import argparse
import copy
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
with open(snake_case__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase__ = []
_list.append([line.split()[1], line.split()[2]] )
lowercase__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase__ = []
_list.append([line.split()[0], line.split()[2]] )
lowercase__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
with open(snake_case__ ) as f:
lowercase__ = f.read(1 )
lowercase__ = start_node
lowercase__ = []
lowercase__ = start_node
lowercase__ = 0
while visiting not in first_solution:
lowercase__ = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution:
lowercase__ = k[1]
lowercase__ = k[0]
first_solution.append(snake_case__ )
lowercase__ = distance_of_first_solution + int(snake_case__ )
lowercase__ = best_node
first_solution.append(snake_case__ )
lowercase__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for n in solution[1:-1]:
lowercase__ = solution.index(snake_case__ )
for kn in solution[1:-1]:
lowercase__ = solution.index(snake_case__ )
if n == kn:
continue
lowercase__ = copy.deepcopy(snake_case__ )
lowercase__ = kn
lowercase__ = n
lowercase__ = 0
for k in _tmp[:-1]:
lowercase__ = _tmp[_tmp.index(snake_case__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase__ = distance + int(i[1] )
_tmp.append(snake_case__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = first_solution
lowercase__ = []
lowercase__ = distance_of_first_solution
lowercase__ = solution
while count <= iters:
lowercase__ = find_neighborhood(snake_case__ , snake_case__ )
lowercase__ = 0
lowercase__ = neighborhood[index_of_best_solution]
lowercase__ = len(snake_case__ ) - 1
lowercase__ = False
while not found:
lowercase__ = 0
while i < len(snake_case__ ):
if best_solution[i] != solution[i]:
lowercase__ = best_solution[i]
lowercase__ = solution[i]
break
lowercase__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase__ = True
lowercase__ = best_solution[:-1]
lowercase__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase__ = cost
lowercase__ = solution
else:
lowercase__ = index_of_best_solution + 1
lowercase__ = neighborhood[index_of_best_solution]
if len(snake_case__ ) >= size:
tabu_list.pop(0 )
lowercase__ = count + 1
return best_solution_ever, best_cost
def a ( lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = generate_neighbours(args.File )
lowercase__ , lowercase__ = generate_first_solution(
args.File , snake_case__ )
lowercase__ , lowercase__ = tabu_search(
snake_case__ , snake_case__ , snake_case__ , args.Iterations , args.Size , )
print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A__ : int = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A__ : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(__SCREAMING_SNAKE_CASE ) - np.asarray(__SCREAMING_SNAKE_CASE )) ** 2 ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def a ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) )
benchmark()
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
A__ : List[Any] = logging.get_logger(__name__)
# General docstring
A__ : Any = 'ResNetConfig'
# Base docstring
A__ : Union[str, Any] = 'microsoft/resnet-50'
A__ : int = [1, 20_48, 7, 7]
# Image classification docstring
A__ : int = 'microsoft/resnet-50'
A__ : Any = 'tiger cat'
A__ : Tuple = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Dict, lowerCamelCase : List[Any] = 3, lowerCamelCase : Optional[Any] = 1, lowerCamelCase : Tuple = "relu" ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(
lowerCamelCase, lowerCamelCase, kernel_size=lowerCamelCase, stride=lowerCamelCase, padding=kernel_size // 2, bias=lowerCamelCase )
lowercase__ = nn.BatchNormad(lowerCamelCase )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowercase__ ( self : Optional[int], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.convolution(lowerCamelCase )
lowercase__ = self.normalization(lowerCamelCase )
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
lowercase__ = ResNetConvLayer(
config.num_channels, config.embedding_size, kernel_size=7, stride=2, activation=config.hidden_act )
lowercase__ = nn.MaxPoolad(kernel_size=3, stride=2, padding=1 )
lowercase__ = config.num_channels
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ = self.embedder(lowerCamelCase )
lowercase__ = self.pooler(lowerCamelCase )
return embedding
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int, lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : str = 2 ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.Convad(lowerCamelCase, lowerCamelCase, kernel_size=1, stride=lowerCamelCase, bias=lowerCamelCase )
lowercase__ = nn.BatchNormad(lowerCamelCase )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.convolution(lowerCamelCase )
lowercase__ = self.normalization(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any] = 1, lowerCamelCase : Dict = "relu" ):
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = (
ResNetShortCut(lowerCamelCase, lowerCamelCase, stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(lowerCamelCase, lowerCamelCase, stride=lowerCamelCase ), ResNetConvLayer(lowerCamelCase, lowerCamelCase, activation=lowerCamelCase ), )
lowercase__ = ACTaFN[activation]
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(lowerCamelCase )
lowercase__ = self.shortcut(lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : str = 1, lowerCamelCase : Tuple = "relu", lowerCamelCase : Optional[int] = 4 ):
'''simple docstring'''
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = out_channels // reduction
lowercase__ = (
ResNetShortCut(lowerCamelCase, lowerCamelCase, stride=lowerCamelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(lowerCamelCase, lowerCamelCase, kernel_size=1 ), ResNetConvLayer(lowerCamelCase, lowerCamelCase, stride=lowerCamelCase ), ResNetConvLayer(lowerCamelCase, lowerCamelCase, kernel_size=1, activation=lowerCamelCase ), )
lowercase__ = ACTaFN[activation]
def lowercase__ ( self : str, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = hidden_state
lowercase__ = self.layer(lowerCamelCase )
lowercase__ = self.shortcut(lowerCamelCase )
hidden_state += residual
lowercase__ = self.activation(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Optional[int] = 2, lowerCamelCase : Union[str, Any] = 2, ):
'''simple docstring'''
super().__init__()
lowercase__ = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase, lowerCamelCase, stride=lowerCamelCase, activation=config.hidden_act ), *[layer(lowerCamelCase, lowerCamelCase, activation=config.hidden_act ) for _ in range(depth - 1 )], )
def lowercase__ ( self : Dict, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = input
for layer in self.layers:
lowercase__ = layer(lowerCamelCase )
return hidden_state
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCamelCase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) )
lowercase__ = zip(config.hidden_sizes, config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase, config.depths[1:] ):
self.stages.append(ResNetStage(lowerCamelCase, lowerCamelCase, lowerCamelCase, depth=lowerCamelCase ) )
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str = False, lowerCamelCase : List[str] = True ):
'''simple docstring'''
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(lowerCamelCase )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCamelCase, hidden_states=lowerCamelCase, )
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ResNetConfig
lowercase__ = """resnet"""
lowercase__ = """pixel_values"""
lowercase__ = True
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCamelCase, nn.Convad ):
nn.init.kaiming_normal_(module.weight, mode='''fan_out''', nonlinearity='''relu''' )
elif isinstance(lowerCamelCase, (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight, 1 )
nn.init.constant_(module.bias, 0 )
def lowercase__ ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = value
A__ : Optional[int] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A__ : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,A__ ,)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase )
lowercase__ = config
lowercase__ = ResNetEmbeddings(lowerCamelCase )
lowercase__ = ResNetEncoder(lowerCamelCase )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, modality='''vision''', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def lowercase__ ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : List[str] = None, lowerCamelCase : Union[str, Any] = None ):
'''simple docstring'''
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(lowerCamelCase )
lowercase__ = self.encoder(
lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(lowerCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase, pooler_output=lowerCamelCase, hidden_states=encoder_outputs.hidden_states, )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,A__ ,)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__(lowerCamelCase )
lowercase__ = config.num_labels
lowercase__ = ResNetModel(lowerCamelCase )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def lowercase__ ( self : Any, lowerCamelCase : Tuple = None, lowerCamelCase : str = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : str = None, ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.resnet(lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(lowerCamelCase )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = '''single_label_classification'''
else:
lowercase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze(), labels.squeeze() )
else:
lowercase__ = loss_fct(lowerCamelCase, lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(lowerCamelCase, lowerCamelCase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase, logits=lowerCamelCase, hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,A__ ,)
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase )
super()._init_backbone(lowerCamelCase )
lowercase__ = [config.embedding_size] + config.hidden_sizes
lowercase__ = ResNetEmbeddings(lowerCamelCase )
lowercase__ = ResNetEncoder(lowerCamelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase )
@replace_return_docstrings(output_type=lowerCamelCase, config_class=_CONFIG_FOR_DOC )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : Any = None, lowerCamelCase : str = None ):
'''simple docstring'''
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = self.embedder(lowerCamelCase )
lowercase__ = self.encoder(lowerCamelCase, output_hidden_states=lowerCamelCase, return_dict=lowerCamelCase )
lowercase__ = outputs.hidden_states
lowercase__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCamelCase, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=lowerCamelCase, )
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=3, lowerCamelCase : Optional[int]=32, lowerCamelCase : Optional[int]=3, lowerCamelCase : int=10, lowerCamelCase : Union[str, Any]=[10, 20, 30, 40], lowerCamelCase : Dict=[1, 1, 2, 1], lowerCamelCase : str=True, lowerCamelCase : Tuple=True, lowerCamelCase : Any="relu", lowerCamelCase : Optional[int]=3, lowerCamelCase : int=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(__A )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase__ ( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Optional[int], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = TFRegNetModel(config=__A )
lowercase__ = model(__A, training=__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self : Tuple, lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRegNetForImageClassification(__A )
lowercase__ = model(__A, labels=__A, training=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TFRegNetModelTester(self )
lowercase__ = ConfigTester(self, config_class=__A, has_text_modality=__A )
def lowercase__ ( self : Dict ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(__A )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], __A )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : str ):
lowercase__ = model_class(__A )
lowercase__ = model(**self._prepare_for_class(__A, __A ), training=__A )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(__A ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(__A, __A, __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(__A, __A, __A )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : Tuple={} ):
lowercase__ = model(__A, return_dict=__A, **__A )
lowercase__ = model(__A, return_dict=__A, **__A ).to_tuple()
def recursive_check(lowerCamelCase : Tuple, lowerCamelCase : Dict ):
if isinstance(__A, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A, __A ):
recursive_check(__A, __A )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__A, __A ) ), msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
), )
recursive_check(__A, __A )
for model_class in self.all_model_classes:
lowercase__ = model_class(__A )
lowercase__ = self._prepare_for_class(__A, __A )
lowercase__ = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A )
lowercase__ = self._prepare_for_class(__A, __A, return_labels=__A )
lowercase__ = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A )
lowercase__ = self._prepare_for_class(__A, __A )
lowercase__ = self._prepare_for_class(__A, __A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
lowercase__ = self._prepare_for_class(__A, __A, return_labels=__A )
lowercase__ = self._prepare_for_class(__A, __A, return_labels=__A )
check_equivalence(__A, __A, __A, {'''output_hidden_states''': True} )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFRegNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : Dict ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=__A, return_tensors='''tf''' )
# forward pass
lowercase__ = model(**__A, training=__A )
# verify the logits
lowercase__ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape, __A )
lowercase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3], __A, atol=1E-4 )
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any = logging.get_logger(__name__)
A__ : List[Any] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def a ( lowerCamelCase_ ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase__ = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith('''encoder''' ):
lowercase__ = k.replace('''.attn''' , '''.self_attn''' )
lowercase__ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowercase__ = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
lowercase__ = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowercase__ = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
lowercase__ = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
lowercase__ = sd.pop(_lowerCamelCase )
lowercase__ = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
lowercase__ = v
A__ : List[str] = ["START"]
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = torch.load(_lowerCamelCase , map_location='''cpu''' )
lowercase__ = model["model"]
lowercase__ = BlenderbotConfig.from_json_file(_lowerCamelCase )
lowercase__ = BlenderbotForConditionalGeneration(_lowerCamelCase )
lowercase__ = m.model.state_dict().keys()
lowercase__ = []
lowercase__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase__ = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
A__ : List[str] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any = logging.get_logger(__name__)
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase__ = [144, 192, 240]
lowercase__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase__ = [96, 120, 144]
lowercase__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase__ = [64, 80, 96]
lowercase__ = [16, 16, 24, 48, 64, 80, 320]
lowercase__ = 0.05
lowercase__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowercase__ = 512
lowercase__ = 16
lowercase__ = 21
lowercase__ = """pascal-voc-id2label.json"""
else:
lowercase__ = 1000
lowercase__ = """imagenet-1k-id2label.json"""
lowercase__ = """huggingface/label-files"""
lowercase__ = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase__ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowercase__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowercase__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowercase__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowercase__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowercase__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowercase__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowercase__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowercase__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase__ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowercase__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowercase__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase__ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowercase__ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowercase__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowercase__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowercase__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowercase__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowercase__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowercase__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowercase__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowercase__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowercase__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowercase__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowercase__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowercase__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowercase__ = """mobilevit.""" + name
return name
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
if base_model:
lowercase__ = """"""
else:
lowercase__ = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(lowerCamelCase_ )
if key[:8] == "encoder.":
lowercase__ = key[8:]
if "qkv" in key:
lowercase__ = key.split('''.''' )
lowercase__ = int(key_split[0][6:] ) - 1
lowercase__ = int(key_split[3] )
lowercase__ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase__ = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
else:
lowercase__ = val
return orig_state_dict
def a ( ):
'''simple docstring'''
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = get_mobilevit_config(lowerCamelCase_ )
# load original state_dict
lowercase__ = torch.load(lowerCamelCase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowercase__ = MobileViTForSemanticSegmentation(lowerCamelCase_ ).eval()
else:
lowercase__ = MobileViTForImageClassification(lowerCamelCase_ ).eval()
lowercase__ = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase__ = model(**lowerCamelCase_ )
lowercase__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase__ = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase__ = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase__ = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase_ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase__ = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowercase__ = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowercase__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
lowercase__ = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print('''Pushing to the hub...''' )
lowercase__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase_ , organization='''apple''' )
model.push_to_hub(lowerCamelCase_ , organization='''apple''' )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A__ : Tuple = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCAmelCase ( __a ):
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
self.test()
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(a_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
lowercase__ = self.update(a_ )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowercase__ ( self : Optional[int], lowerCamelCase : int ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowercase__ ( self : Optional[int], lowerCamelCase : int ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowercase__ ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowercase__ ( self : int ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def lowercase__ ( self : List[str], lowerCamelCase : Tuple=False ):
'''simple docstring'''
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( __a ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : List[int] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
lowercase__ = token_ids
lowercase__ = len(self.token_ids )
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowercase__ ( self : List[str], lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}""" )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = False
lowercase__ = 0
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str=False ):
'''simple docstring'''
lowercase__ = PhrasalConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int, lowerCamelCase : List[List[int]], lowerCamelCase : int=True ):
'''simple docstring'''
lowercase__ = max([len(a_ ) for one in nested_token_ids] )
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(a_, a_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
lowercase__ = root
def lowercase__ ( self : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys() )
return next_tokens
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.next_tokens(a_ )
return len(a_ ) == 0
def lowercase__ ( self : Dict, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def lowercase__ ( self : str, lowerCamelCase : List[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.count_leaves(a_ )
return len(a_ ) != leaf_count
class _UpperCAmelCase ( __a ):
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : List[List[int]] ):
'''simple docstring'''
super(a_, self ).__init__()
if not isinstance(a_, a_ ) or len(a_ ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(a_, a_ ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(a_, a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
lowercase__ = DisjunctiveTrie(a_ )
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def lowercase__ ( self : Tuple, lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}""" )
lowercase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowercase__ ( self : Any, lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}""" )
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq )
lowercase__ = completed
return stepped, completed, reset
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = False
lowercase__ = []
def lowercase__ ( self : str ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowercase__ ( self : Dict, lowerCamelCase : List[Any]=False ):
'''simple docstring'''
lowercase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Any, lowerCamelCase : List[Constraint] ):
'''simple docstring'''
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints] )
lowercase__ = len(a_ )
lowercase__ = False
self.init_state()
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=a_ ) for constraint in self.constraints]
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(a_, a_ ):
token_list.append(a_ )
elif isinstance(a_, a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def lowercase__ ( self : int, lowerCamelCase : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__ = self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(a_, a_ ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__ = self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowercase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
lowercase__ = pending_constraint.update(a_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(a_ )
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowercase__ ( self : Dict, lowerCamelCase : int=True ):
'''simple docstring'''
lowercase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=a_ )
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = (DPMSolverSDEScheduler,)
lowercase__ = 10
def lowercase__ ( self : Optional[int], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCAmelCase )
return config
def lowercase__ ( self : Dict ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase, beta_end=__lowerCAmelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = model(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ = sample.to(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = scheduler.scale_model_input(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = model(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=__lowerCAmelCase )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = model(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**__lowerCAmelCase, use_karras_sigmas=__lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps, device=__lowerCAmelCase )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.to(__lowerCAmelCase ) * scheduler.init_noise_sigma
lowercase__ = sample.to(__lowerCAmelCase )
for t in scheduler.timesteps:
lowercase__ = scheduler.scale_model_input(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = model(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = scheduler.step(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase )
lowercase__ = output.prev_sample
lowercase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : List[str] = logging.get_logger(__name__)
A__ : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( _UpperCAmelCase ,_UpperCAmelCase ):
"""simple docstring"""
lowercase__ = """nat"""
lowercase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[int], lowerCamelCase : Optional[Any]=4, lowerCamelCase : Any=3, lowerCamelCase : List[str]=64, lowerCamelCase : List[str]=[3, 4, 6, 5], lowerCamelCase : Optional[int]=[2, 4, 8, 16], lowerCamelCase : List[str]=7, lowerCamelCase : int=3.0, lowerCamelCase : Tuple=True, lowerCamelCase : Dict=0.0, lowerCamelCase : Union[str, Any]=0.0, lowerCamelCase : int=0.1, lowerCamelCase : str="gelu", lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : List[str]=1E-5, lowerCamelCase : int=0.0, lowerCamelCase : int=None, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : str, ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(lowercase_ )
lowercase__ = num_heads
lowercase__ = kernel_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowercase__ = layer_scale_init_value
lowercase__ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1, len(lowercase_ ) + 1 )]
lowercase__ = get_aligned_output_features_output_indices(
out_features=lowercase_, out_indices=lowercase_, stage_names=self.stage_names )
| 708 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
lowercase__ = arr.split(''',''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
lowercase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A__ : Dict = input('please input some numbers:')
A__ : Union[str, Any] = SubArray(whole_array)
A__ : int = array.solve_sub_array()
print(('the results is:', re))
| 671 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( a__ ,a__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = VQModel
lowercase__ = """sample"""
@property
def lowercase__ ( self : Optional[int], lowerCamelCase : Dict=(32, 32) ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 3
lowercase__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase__ )
return {"sample": image}
@property
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
@property
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self : str ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase__ )
lowercase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowercase__ = image.to(lowerCAmelCase__ )
with torch.no_grad():
lowercase__ = model(lowerCAmelCase__ ).sample
lowercase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase__ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1E-3 ) )
| 709 |
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ = '''The dog is cute and lives in the garden house'''
lowercase__ = jnp.array([tokenizer.encode(__lowerCAmelCase )] )
lowercase__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ = model(__lowerCAmelCase )['''last_hidden_state''']
self.assertEqual(output.shape, __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1], __lowerCAmelCase, atol=1E-3 ) )
| 710 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
A__ : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""input_features""", """is_longer"""]
def __init__( self : Optional[int], lowerCamelCase : int=64, lowerCamelCase : Union[str, Any]=48_000, lowerCamelCase : str=480, lowerCamelCase : Tuple=10, lowerCamelCase : List[Any]=1_024, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[Any]=False, lowerCamelCase : float = 0, lowerCamelCase : float = 14_000, lowerCamelCase : int = None, lowerCamelCase : str = "fusion", lowerCamelCase : str = "repeatpad", **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
feature_size=lowerCamelCase, sampling_rate=lowerCamelCase, padding_value=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
lowercase__ = top_db
lowercase__ = truncation
lowercase__ = padding
lowercase__ = fft_window_size
lowercase__ = (fft_window_size >> 1) + 1
lowercase__ = hop_length
lowercase__ = max_length_s
lowercase__ = max_length_s * sampling_rate
lowercase__ = sampling_rate
lowercase__ = frequency_min
lowercase__ = frequency_max
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm=lowerCamelCase, mel_scale='''htk''', )
lowercase__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=lowerCamelCase, min_frequency=lowerCamelCase, max_frequency=lowerCamelCase, sampling_rate=lowerCamelCase, norm='''slaney''', mel_scale='''slaney''', )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[int], lowerCamelCase : np.array, lowerCamelCase : Optional[np.array] = None ):
'''simple docstring'''
lowercase__ = spectrogram(
lowerCamelCase, window_function(self.fft_window_size, '''hann''' ), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=lowerCamelCase, log_mel='''dB''', )
return log_mel_spectrogram.T
def lowercase__ ( self : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = np.array_split(list(range(0, total_frames - chunk_frames + 1 ) ), 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowercase__ = [0]
# randomly choose index for each part
lowercase__ = np.random.choice(ranges[0] )
lowercase__ = np.random.choice(ranges[1] )
lowercase__ = np.random.choice(ranges[2] )
lowercase__ = mel[idx_front : idx_front + chunk_frames, :]
lowercase__ = mel[idx_middle : idx_middle + chunk_frames, :]
lowercase__ = mel[idx_back : idx_back + chunk_frames, :]
lowercase__ = torch.tensor(mel[None, None, :] )
lowercase__ = torch.nn.functional.interpolate(
lowerCamelCase, size=[chunk_frames, 64], mode='''bilinear''', align_corners=lowerCamelCase )
lowercase__ = mel_shrink[0][0].numpy()
lowercase__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0 )
return mel_fusion
def lowercase__ ( self : List[str], lowerCamelCase : np.array, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowercase__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowercase__ = len(lowerCamelCase ) - max_length
lowercase__ = np.random.randint(0, overflow + 1 )
lowercase__ = waveform[idx : idx + max_length]
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowercase__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowercase__ = np.stack([mel, mel, mel, mel], axis=0 )
lowercase__ = False
else:
lowercase__ = self._random_mel_fusion(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowercase__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowercase__ = int(max_length / len(lowerCamelCase ) )
lowercase__ = np.stack(np.tile(lowerCamelCase, lowerCamelCase ) )
lowercase__ = np.pad(lowerCamelCase, (0, max_length - waveform.shape[0]), mode='''constant''', constant_values=0 )
if truncation == "fusion":
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters )
lowercase__ = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0 )
else:
lowercase__ = self._np_extract_fbank_features(lowerCamelCase, self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any], lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], lowerCamelCase : str = None, lowerCamelCase : Optional[str] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], ):
'''simple docstring'''
lowercase__ = truncation if truncation is not None else self.truncation
lowercase__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__ = isinstance(lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase, np.ndarray ):
lowercase__ = np.asarray(lowerCamelCase, dtype=np.floataa )
elif isinstance(lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowercase__ = [
self._get_input_mel(lowerCamelCase, max_length if max_length else self.nb_max_samples, lowerCamelCase, lowerCamelCase )
for waveform in raw_speech
]
lowercase__ = []
lowercase__ = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowercase__ = np.random.randint(0, len(lowerCamelCase ) )
lowercase__ = True
if isinstance(input_mel[0], lowerCamelCase ):
lowercase__ = [np.asarray(lowerCamelCase, dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowercase__ = [[longer] for longer in is_longer]
lowercase__ = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowercase__ = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowercase__ = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 671 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : int = '▁'
A__ : Dict = {'vocab_file': 'sentencepiece.bpe.model'}
A__ : Optional[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A__ : List[Any] = {
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
A__ : List[str] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _UpperCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
lowercase__ = []
def __init__( self : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : int="<s>", lowerCamelCase : Optional[Any]="</s>", lowerCamelCase : List[Any]="</s>", lowerCamelCase : Optional[Any]="<s>", lowerCamelCase : int="<unk>", lowerCamelCase : str="<pad>", lowerCamelCase : List[Any]="<mask>", lowerCamelCase : int=None, lowerCamelCase : Tuple=None, lowerCamelCase : Optional[int]=None, lowerCamelCase : Tuple = None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[int]=False, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(UpperCAmelCase_, lstrip=UpperCAmelCase_, rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_, UpperCAmelCase_ ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = legacy_behaviour
super().__init__(
bos_token=UpperCAmelCase_, eos_token=UpperCAmelCase_, unk_token=UpperCAmelCase_, sep_token=UpperCAmelCase_, cls_token=UpperCAmelCase_, pad_token=UpperCAmelCase_, mask_token=UpperCAmelCase_, tokenizer_file=UpperCAmelCase_, src_lang=UpperCAmelCase_, tgt_lang=UpperCAmelCase_, additional_special_tokens=UpperCAmelCase_, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=UpperCAmelCase_, **UpperCAmelCase_, )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model )
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase__ = src_lang if src_lang is not None else '''eng_Latn'''
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase__ ( self : str, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Optional[Any], lowerCamelCase : str, lowerCamelCase : Any = None, lowerCamelCase : Optional[int] = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_, token_ids_a=UpperCAmelCase_, already_has_special_tokens=UpperCAmelCase_ )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Any], lowerCamelCase : List[str] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : List[str], **lowerCamelCase : Optional[int] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__ = src_lang
lowercase__ = self(UpperCAmelCase_, add_special_tokens=UpperCAmelCase_, return_tensors=UpperCAmelCase_, **UpperCAmelCase_ )
lowercase__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
lowercase__ = tgt_lang_id
return inputs
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.encode(UpperCAmelCase_, out_type=UpperCAmelCase_ )
def lowercase__ ( self : List[Any], lowerCamelCase : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCAmelCase_ ).replace(UpperCAmelCase_, ''' ''' ).strip()
return out_string
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Dict = None ):
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
UpperCAmelCase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_, '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def lowercase__ ( self : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any] = "eng_Latn", lowerCamelCase : str = None, lowerCamelCase : List[str] = "fra_Latn", **lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_, UpperCAmelCase_, **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : Any, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
def lowercase__ ( self : int, lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
| 711 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = None
lowercase__ = None
def a ( ):
'''simple docstring'''
lowercase__ = Node(1 )
lowercase__ = Node(2 )
lowercase__ = Node(3 )
lowercase__ = Node(4 )
lowercase__ = Node(5 )
return tree
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a ( lowerCamelCase_ ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
if root is None:
return output
lowercase__ = deque([root] )
while process_queue:
lowercase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
def populate_output(lowerCamelCase_ , lowerCamelCase_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase_ , lowerCamelCase_ )
return output
def a ( lowerCamelCase_ ):
'''simple docstring'''
if root is None:
return []
lowercase__ = []
lowercase__ = 0
lowercase__ = height(lowerCamelCase_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = 0
return output
def a ( ): # Main function for testing.
'''simple docstring'''
lowercase__ = make_tree()
print(F"""In-order Traversal: {inorder(lowerCamelCase_ )}""" )
print(F"""Pre-order Traversal: {preorder(lowerCamelCase_ )}""" )
print(F"""Post-order Traversal: {postorder(lowerCamelCase_ )}""" , '''\n''' )
print(F"""Height of Tree: {height(lowerCamelCase_ )}""" , '''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(lowerCamelCase_ ) , '''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 , height(lowerCamelCase_ ) + 1 ):
print(F"""Level {level}:""" , get_nodes_from_left_to_right(lowerCamelCase_ , level=lowerCamelCase_ ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 671 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any]=13, lowerCamelCase : List[str]=7, lowerCamelCase : List[str]=True, lowerCamelCase : int=True, lowerCamelCase : Dict=True, lowerCamelCase : str=True, lowerCamelCase : Dict=99, lowerCamelCase : Optional[int]=32, lowerCamelCase : str=5, lowerCamelCase : Optional[Any]=4, lowerCamelCase : int=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : str=512, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : Tuple=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Tuple=4, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_and_inputs
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( UpperCamelCase_ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = True
lowercase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = FlaxRobertaModelTester(self )
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('''roberta-base''', from_pt=UpperCamelCase__ )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 712 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = DistilBertTokenizer
lowercase__ = DistilBertTokenizerFast
lowercase__ = True
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
lowercase__ = tokenizer.encode('''sequence builders''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( lowerCamelCase_=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser('''env''' )
else:
lowercase__ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCamelCase , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = is_xpu_available()
lowercase__ = is_npu_available()
lowercase__ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
lowercase__ = load_config_from_file(args.config_file ).to_dict()
lowercase__ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(__UpperCamelCase ),
'''PyTorch NPU available''': str(__UpperCamelCase ),
'''System RAM''': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowercase__ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
lowercase__ = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F"""\t{accelerate_config}"""
)
print(__UpperCamelCase )
lowercase__ = accelerate_config
return info
def a ( ):
'''simple docstring'''
lowercase__ = env_command_parser()
lowercase__ = parser.parse_args()
env_command(__UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 713 |
from __future__ import annotations
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(lowerCamelCase_ )
first_sum += 1 / float(lowerCamelCase_ )
index += 1
return 1 / first_sum
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = F"""Resistor at index {index} has a negative value!"""
raise ValueError(lowerCamelCase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
A__ : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=lowerCamelCase )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*lowerCamelCase )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self : List[str], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.pool(self.model(lowerCamelCase ) )
lowercase__ = torch.flatten(lowerCamelCase, start_dim=2 )
lowercase__ = out.transpose(1, 2 ).contiguous()
return out # BxNx2048
class _UpperCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = [json.loads(lowerCamelCase ) for l in open(lowerCamelCase )]
lowercase__ = os.path.dirname(lowerCamelCase )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(lowerCamelCase )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''], add_special_tokens=lowerCamelCase ) )
lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir, self.data[index]['''img'''] ) ).convert('''RGB''' )
lowercase__ = self.transforms(lowerCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [len(row['''sentence'''] ) for row in batch]
lowercase__ = len(lowerCamelCase_ ), max(lowerCamelCase_ )
lowercase__ = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
lowercase__ = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
lowercase__ = input_row["""sentence"""]
lowercase__ = 1
lowercase__ = torch.stack([row['''image'''] for row in batch] )
lowercase__ = torch.stack([row['''label'''] for row in batch] )
lowercase__ = torch.stack([row['''image_start_token'''] for row in batch] )
lowercase__ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 714 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase__ = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('''RGB''' )
lowercase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase__ = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "visual_encoder" in key:
lowercase__ = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase_ )
if "blocks" in key:
lowercase__ = re.sub(r'''blocks''' , '''layers''' , lowerCamelCase_ )
if "attn" in key:
lowercase__ = re.sub(r'''attn''' , '''self_attn''' , lowerCamelCase_ )
if "norm1" in key:
lowercase__ = re.sub(r'''norm1''' , '''layer_norm1''' , lowerCamelCase_ )
if "norm2" in key:
lowercase__ = re.sub(r'''norm2''' , '''layer_norm2''' , lowerCamelCase_ )
if "encoder.norm" in key:
lowercase__ = re.sub(r'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
lowercase__ = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase_ )
if "encoder.pos_embed" in key:
lowercase__ = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase_ )
if "encoder.cls_token" in key:
lowercase__ = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase_ )
if "self_attn" in key:
lowercase__ = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase_ )
return key
@torch.no_grad()
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
if config_path is not None:
lowercase__ = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
lowercase__ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowercase__ = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase__ = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit='''base''' )
lowercase__ = pt_model.eval()
lowercase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
hf_model.load_state_dict(lowerCamelCase_ )
lowercase__ = 384
lowercase__ = load_demo_image(image_size=lowerCamelCase_ , device='''cpu''' )
lowercase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase__ = tokenizer(['''a picture of'''] ).input_ids
lowercase__ = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase__ = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
vqa_model.eval()
lowercase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
lowercase__ = ['''How many dogs are in this image?''']
lowercase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase__ = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase__ = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase__ = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit='''base''' )
itm_model.eval()
lowercase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ = modified_state_dict.pop(lowerCamelCase_ )
lowercase__ = rename_key(lowerCamelCase_ )
lowercase__ = value
lowercase__ = BlipForImageTextRetrieval(lowerCamelCase_ )
lowercase__ = ['''A picture of a woman with a dog sitting in a beach''']
lowercase__ = tokenizer(
lowerCamelCase_ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
lowercase__ = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 671 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowercase__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = TransfoXLTokenizer
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowercase__ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Union[str, Any], **lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **__lowercase )
def lowercase__ ( self : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<unk> UNwanted , running'''
lowercase__ = '''<unk> unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=__lowercase )
lowercase__ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase, ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ), [0, 4, 8, 7] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ), ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = TransfoXLTokenizer(lower_case=__lowercase )
lowercase__ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowercase__ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ), __lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ), __lowercase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''', 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ), original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ), [1] )
self.assertEqual(tokenizer.decode([1] ), '''new1''' )
| 715 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, lowerCamelCase : Any, lowerCamelCase : Tuple=7, lowerCamelCase : str=3, lowerCamelCase : Tuple=18, lowerCamelCase : int=30, lowerCamelCase : Tuple=400, lowerCamelCase : Any=True, lowerCamelCase : Any=None, lowerCamelCase : List[str]=True, lowerCamelCase : Union[str, Any]=None, ):
'''simple docstring'''
lowercase__ = size if size is not None else {'''shortest_edge''': 20}
lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def lowercase__ ( self : Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''crop_size''' ) )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Any ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def lowercase__ ( self : str ):
'''simple docstring'''
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
lowercase__ = image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 671 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ ,__snake_case ,)
class _UpperCAmelCase ( __snake_case ):
"""simple docstring"""
lowercase__ = RobertaConfig
lowercase__ = 'roberta'
def __init__( self : Any, lowerCamelCase : Dict ):
'''simple docstring'''
super().__init__(A_ )
lowercase__ = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. """ ,__snake_case ,)
class _UpperCAmelCase ( __snake_case ):
"""simple docstring"""
lowercase__ = RobertaConfig
lowercase__ = 'roberta'
def __init__( self : str, lowerCamelCase : Tuple ):
'''simple docstring'''
super().__init__(A_ )
lowercase__ = config.num_labels
lowercase__ = config.num_hidden_layers
lowercase__ = DeeRobertaModel(A_ )
lowercase__ = nn.Dropout(config.hidden_dropout_prob )
lowercase__ = nn.Linear(config.hidden_size, self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def lowercase__ ( self : List[Any], lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : str=None, lowerCamelCase : List[Any]=None, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : Tuple=None, lowerCamelCase : Dict=-1, lowerCamelCase : List[Any]=False, ):
'''simple docstring'''
lowercase__ = self.num_layers
try:
lowercase__ = self.roberta(
A_, attention_mask=A_, token_type_ids=A_, position_ids=A_, head_mask=A_, inputs_embeds=A_, )
lowercase__ = outputs[1]
lowercase__ = self.dropout(A_ )
lowercase__ = self.classifier(A_ )
lowercase__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase__ = e.message
lowercase__ = e.exit_layer
lowercase__ = outputs[0]
if not self.training:
lowercase__ = entropy(A_ )
lowercase__ = []
lowercase__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase__ = MSELoss()
lowercase__ = loss_fct(logits.view(-1 ), labels.view(-1 ) )
else:
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
# work with highway exits
lowercase__ = []
for highway_exit in outputs[-1]:
lowercase__ = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase__ = MSELoss()
lowercase__ = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) )
else:
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
lowercase__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase__ = (loss,) + outputs
if not self.training:
lowercase__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 716 |
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671 | 0 |
from __future__ import annotations
import math
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase_ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowercase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def a ( lowerCamelCase_ ):
'''simple docstring'''
if len(lowerCamelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = matrix_length // 2
lowercase__ = [[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
lowercase__ = [
[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )
]
lowercase__ = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
lowercase__ = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )]
return top_left, top_right, bot_left, bot_right
def a ( lowerCamelCase_ ):
'''simple docstring'''
return len(lowerCamelCase_ ), len(matrix[0] )
def a ( lowerCamelCase_ ):
'''simple docstring'''
print('''\n'''.join(str(lowerCamelCase_ ) for line in matrix ) )
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase_ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(lowerCamelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = split_matrix(lowerCamelCase_ )
lowercase__ = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
lowercase__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
lowercase__ = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
lowercase__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
lowercase__ = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
# construct the new matrix from our 4 quadrants
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase_ )[1] != matrix_dimensions(lowerCamelCase_ )[0]:
lowercase__ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(lowerCamelCase_ )
lowercase__ = matrix_dimensions(lowerCamelCase_ )
lowercase__ = matrix_dimensions(lowerCamelCase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase__ = max(*lowerCamelCase_ , *lowerCamelCase_ )
lowercase__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase_ ) ) ) )
lowercase__ = matrixa
lowercase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase__ = actual_strassen(lowerCamelCase_ , lowerCamelCase_ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A__ : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A__ : Any = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
'''simple docstring'''
A__ : Dict = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 718 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = F"""WER: {wer_result}\nCER: {cer_result}"""
print(lowerCamelCase_ )
with open(F"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowerCamelCase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = F"""log_{dataset_id}_predictions.txt"""
lowercase__ = F"""log_{dataset_id}_targets.txt"""
with open(lowerCamelCase_ , '''w''' ) as p, open(lowerCamelCase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase_ , lowerCamelCase_ ):
p.write(F"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCamelCase_ , with_indices=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(lowerCamelCase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(lowerCamelCase_ ) )
return text
def a ( lowerCamelCase_ ):
'''simple docstring'''
# load dataset
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCamelCase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCamelCase_ ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase_ ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(lowerCamelCase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A__ : Union[str, Any] = parser.parse_args()
main(args)
| 671 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def a ( lowerCamelCase_ ):
'''simple docstring'''
for param in module.parameters():
lowercase__ = False
def a ( ):
'''simple docstring'''
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = plt.imshow(lowercase__ )
fig.axes.get_xaxis().set_visible(lowercase__ )
fig.axes.get_yaxis().set_visible(lowercase__ )
plt.show()
def a ( ):
'''simple docstring'''
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 719 |
from functools import reduce
A__ : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( lowerCamelCase_ = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCamelCase_ , lowerCamelCase_ : str(int(lowerCamelCase_ ) * int(lowerCamelCase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowerCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowercase__ = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
lowercase__ = """The dog is cute and lives in the garden house"""
lowercase__ = jnp.array([tokenizer.encode(__a )] )
lowercase__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowercase__ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
lowercase__ = model(__a )["""last_hidden_state"""]
self.assertEqual(output.shape, __a )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1], __a, atol=1E-3 ) )
| 720 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase ( A__ ,A__ ):
"""simple docstring"""
lowercase__ = 1
@register_to_config
def __init__( self : Union[str, Any], lowerCamelCase : int = 2_000, lowerCamelCase : float = 0.15, lowerCamelCase : float = 0.01, lowerCamelCase : float = 1348.0, lowerCamelCase : float = 1E-5, lowerCamelCase : int = 1, ):
'''simple docstring'''
# standard deviation of the initial noise distribution
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def lowercase__ ( self : Dict, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, lowerCamelCase, lowerCamelCase, device=lowerCamelCase )
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : float = None, lowerCamelCase : float = None, lowerCamelCase : float = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCamelCase, lowerCamelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(lowerCamelCase ), math.log(lowerCamelCase ), lowerCamelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowercase__ ( self : Optional[int], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def lowercase__ ( self : Tuple, lowerCamelCase : torch.FloatTensor, lowerCamelCase : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(lowerCamelCase, lowerCamelCase ).to(sample.device )
lowercase__ = torch.zeros_like(lowerCamelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=lowerCamelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCamelCase, prev_sample_mean=lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : bool = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, lowerCamelCase : torch.FloatTensor, ):
'''simple docstring'''
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCamelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 671 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
A__ : Dict = 20_48
A__ : Dict = 40_96
A__ : List[Any] = 42
A__ : List[str] = os.environ.pop('PROCESS_TRAIN', 'false')
A__ : Optional[int] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def a ( lowerCamelCase_ ):
'''simple docstring'''
def choose_first(lowerCamelCase_ , lowerCamelCase_=False ):
assert isinstance(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
lowercase__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase__ = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
lowercase__ = {'''id''': example['''id''']}
lowercase__ = example['''annotations''']
lowercase__ = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase__ = ['''yes'''] if 1 in yes_no_answer else ['''no''']
lowercase__ = lowercase__ = []
lowercase__ = lowercase__ = []
lowercase__ = ['''<cls>''']
else:
lowercase__ = ['''short''']
lowercase__ = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
lowercase__ = ['''long''']
lowercase__ = choose_first(annotation['''long_answer'''] , is_long_answer=_UpperCamelCase )
lowercase__ = []
answer.update(_UpperCamelCase )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase__ = True
else:
lowercase__ = False
lowercase__ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , _UpperCamelCase ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def a ( lowerCamelCase_ , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = _get_single_answer(_UpperCamelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ = example['''document''']['''tokens''']
lowercase__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase__ = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase__ = example['''document''']['''tokens''']
lowercase__ = answer['''start_token''']
lowercase__ = answer['''end_token''']
lowercase__ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase__ = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase__ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
lowercase__ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
lowercase__ = ''' '''.join([old[i] for i in range(len(_UpperCamelCase ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , _UpperCamelCase , end='''\n''' )
print('''Old:''' , _UpperCamelCase , end='''\n\n''' )
return {
"context": " ".join(_UpperCamelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2048 , lowerCamelCase_=4096 , lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = get_context_and_ans(_UpperCamelCase , assertion=_UpperCamelCase )
lowercase__ = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase__ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
lowercase__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase__ = []
lowercase__ = []
lowercase__ = input_ids[:q_len]
lowercase__ = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase__ = i + max_length - q_len
lowercase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_UpperCamelCase ),
"end_token": [-100] * len(_UpperCamelCase ),
"category": category,
},
}
lowercase__ = out['''context'''].split()
lowercase__ = splitted_context[answer['''end_token''']]
lowercase__ = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=_UpperCamelCase , ).input_ids )
lowercase__ = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=_UpperCamelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase__ = len(tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase__ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
lowercase__ = answer['''start_token''']
lowercase__ = answer['''end_token''']
if assertion:
lowercase__ = tokenizer.decode(_UpperCamelCase )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , _UpperCamelCase , end='''\n\n''' )
if len(_UpperCamelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase__ = input_ids[:q_len]
lowercase__ = range(_UpperCamelCase , len(_UpperCamelCase ) , max_length - doc_stride )
lowercase__ = []
lowercase__ = []
lowercase__ = []
lowercase__ = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase__ = i + max_length - q_len
lowercase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase__ = start_token - i + q_len
lowercase__ = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
lowercase__ = -100
lowercase__ = -100
answers_category.append('''null''' )
lowercase__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCamelCase )
answers_end_token.append(_UpperCamelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(_UpperCamelCase ) )
print('''Old:''' , tokenizer.decode(_UpperCamelCase ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=2048 , lowerCamelCase_=4096 , lowerCamelCase_=False ):
'''simple docstring'''
lowercase__ = get_strided_contexts_and_ans(
_UpperCamelCase , _UpperCamelCase , doc_stride=_UpperCamelCase , max_length=_UpperCamelCase , assertion=_UpperCamelCase , )
return example
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
with jsonlines.open(_UpperCamelCase , '''a''' ) as writer:
for example in tqdm(_UpperCamelCase , total=len(_UpperCamelCase ) , desc='''Saving samples ... ''' ):
lowercase__ = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
A__ : List[Any] = load_dataset('natural_questions')
A__ : Any = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
A__ : Dict = data["train" if PROCESS_TRAIN == "true" else "validation"]
A__ : List[Any] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
A__ : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
A__ : Optional[Any] = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
A__ : Optional[int] = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 721 |
from collections import defaultdict
from math import gcd
def a ( lowerCamelCase_ = 150_0000 ):
'''simple docstring'''
lowercase__ = defaultdict(lowerCamelCase_ )
lowercase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCamelCase_ , 2 ):
if gcd(lowerCamelCase_ , lowerCamelCase_ ) > 1:
continue
lowercase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCamelCase_ , limit + 1 , lowerCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : int = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['ConditionalDetrFeatureExtractor']
A__ : Optional[int] = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : Optional[int] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
A__ : List[str] = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
A__ : Optional[int] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BertTokenizer
def __init__( self : Any, lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : Dict="[UNK]", lowerCamelCase : Any="[SEP]", lowerCamelCase : List[Any]="[PAD]", lowerCamelCase : Optional[Any]="[CLS]", lowerCamelCase : Dict="[MASK]", lowerCamelCase : List[Any]=True, lowerCamelCase : Tuple=None, **lowerCamelCase : Dict, ):
'''simple docstring'''
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, do_lower_case=lowerCamelCase, unk_token=lowerCamelCase, sep_token=lowerCamelCase, pad_token=lowerCamelCase, cls_token=lowerCamelCase, mask_token=lowerCamelCase, tokenize_chinese_chars=lowerCamelCase, strip_accents=lowerCamelCase, **lowerCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(lowerCamelCase, normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**lowerCamelCase )
lowercase__ = do_lower_case
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : Dict=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase, name=lowerCamelCase )
return tuple(lowerCamelCase )
| 671 | 0 |
from PIL import Image
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
def brightness(lowerCamelCase_ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
A__ : Any = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 701 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]=2, lowerCamelCase : str=8, lowerCamelCase : int=True, lowerCamelCase : List[str]=True, lowerCamelCase : List[Any]=True, lowerCamelCase : str=True, lowerCamelCase : int=99, lowerCamelCase : str=16, lowerCamelCase : int=5, lowerCamelCase : Dict=2, lowerCamelCase : str=36, lowerCamelCase : int="gelu", lowerCamelCase : Any=0.0, lowerCamelCase : Union[str, Any]=0.0, lowerCamelCase : List[str]=512, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : Any=0.02, lowerCamelCase : Tuple=3, lowerCamelCase : List[str]=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__UpperCamelCase, initializer_range=self.initializer_range, )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.get_config()
lowercase__ = 300
return config
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : str, lowerCamelCase : int, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = MraModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase, token_type_ids=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = True
lowercase__ = MraModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, encoder_hidden_states=__UpperCamelCase, encoder_attention_mask=__UpperCamelCase, )
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, encoder_hidden_states=__UpperCamelCase, )
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = MraForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : int, lowerCamelCase : List[str], lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[str], lowerCamelCase : str, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = MraForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MraForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[Any], lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Tuple, lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MraForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = MraForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = ()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = MraModelTester(self )
lowercase__ = ConfigTester(self, config_class=__UpperCamelCase, hidden_size=37 )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MraModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ = model(__UpperCamelCase )[0]
lowercase__ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape, __UpperCamelCase )
lowercase__ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
lowercase__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ = model(__UpperCamelCase )[0]
lowercase__ = 50_265
lowercase__ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape, __UpperCamelCase )
lowercase__ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
@slow
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
lowercase__ = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
lowercase__ = model(__UpperCamelCase )[0]
lowercase__ = 50_265
lowercase__ = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape, __UpperCamelCase )
lowercase__ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
| 702 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
A__ : Dict = 50_00_00
A__ , A__ : str = os.path.split(__file__)
A__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.map(**lowerCamelCase_ )
@get_duration
def a ( lowerCamelCase_ , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = dataset.filter(**lowerCamelCase_ )
def a ( ):
'''simple docstring'''
lowercase__ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , '''dataset.arrow''' ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
lowercase__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ ):
return tokenizer(examples['''text'''] )
lowercase__ = map(lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''numpy''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''pandas''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
lowercase__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
lowercase__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
lowercase__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , '''wb''' ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 671 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : int = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
require_version(deps[pkg] , lowerCAmelCase_ )
| 703 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : str = "", lowerCamelCase : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
lowercase__ = {}
# A node will be a leaf if the tree contains its word
lowercase__ = is_leaf
lowercase__ = prefix
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = 0
for q, w in zip(self.prefix, lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase__ ( self : Optional[int], lowerCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase__ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase__ = RadixNode(prefix=lowerCamelCase, is_leaf=lowerCamelCase )
else:
lowercase__ = self.nodes[word[0]]
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase__ = remaining_prefix
lowercase__ = self.nodes[matching_string[0]]
lowercase__ = RadixNode(lowerCamelCase, lowerCamelCase )
lowercase__ = aux_node
if remaining_word == "":
lowercase__ = True
else:
self.nodes[matching_string[0]].insert(lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCamelCase )
def lowercase__ ( self : Any, lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.nodes.get(word[0], lowerCamelCase )
if not incoming_node:
return False
else:
lowercase__ , lowercase__ , lowercase__ = incoming_node.match(
lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase__ = list(self.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase__ = False
# If there is 1 edge, we merge it with its child
else:
lowercase__ = list(incoming_node.nodes.values() )[0]
lowercase__ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase__ = merging_node.nodes
return True
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def a ( ):
'''simple docstring'''
lowercase__ = '''banana bananas bandana band apple all beast'''.split()
lowercase__ = RadixNode()
root.insert_many(lowerCamelCase_ )
assert all(root.find(lowerCamelCase_ ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def a ( ):
'''simple docstring'''
assert test_trie()
def a ( ):
'''simple docstring'''
lowercase__ = RadixNode()
lowercase__ = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(lowerCamelCase_ )
print('''Words:''' , lowerCamelCase_ )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 671 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
A__ : Any = 1_00
A__ : List[Any] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
A__ : Dict = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def a ( lowerCamelCase_ ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a ( lowerCamelCase_ = 5000 ):
'''simple docstring'''
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE__ ):
if len(partition(SCREAMING_SNAKE_CASE__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 704 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = ViTImageProcessor if is_vision_available() else None
@property
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = (3, 32, 128)
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
lowercase__ = dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase ) + '''\n''' )
lowercase__ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
lowercase__ = os.path.join(self.tmpdirname, lowerCamelCase )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : int, **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : str, **lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
lowercase__ = Image.fromarray(np.moveaxis(lowerCamelCase, 0, -1 ) )
return image_input
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowercase__ = self.get_image_processor(do_normalize=lowerCamelCase, padding_value=1.0 )
lowercase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=lowerCamelCase, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(lowerCamelCase, return_tensors='''np''' )
lowercase__ = processor(images=lowerCamelCase, return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = processor(text=lowerCamelCase )
lowercase__ = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = '''test'''
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.char_decode(lowerCamelCase )
lowercase__ = tokenizer.batch_decode(lowerCamelCase )
lowercase__ = [seq.replace(''' ''', '''''' ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = None
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=lowerCamelCase, images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = MgpstrProcessor(tokenizer=lowerCamelCase, image_processor=lowerCamelCase )
lowercase__ = torch.randn(1, 27, 38 )
lowercase__ = torch.randn(1, 27, 50_257 )
lowercase__ = torch.randn(1, 27, 30_522 )
lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 671 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
A__ : str = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : Optional[int], lowerCamelCase : List[Any], lowerCamelCase : Any=None, lowerCamelCase : int=1 ):
'''simple docstring'''
lowercase__ = tokenizer
lowercase__ = dataset
lowercase__ = len(__snake_case ) if n_tasks is None else n_tasks
lowercase__ = n_copies
def __iter__( self : Tuple ):
'''simple docstring'''
lowercase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowercase__ = self.tokenizer(__snake_case, padding=__snake_case, return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = start_length
lowercase__ = eof_strings
lowercase__ = tokenizer
def __call__( self : Any, lowerCamelCase : str, lowerCamelCase : Dict, **lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = re.split('''(%s)''' % '''|'''.join(a_ ) , a_ )
# last string should be ""
return "".join(string_list[:-2] )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=20 , **lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = defaultdict(a_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a_ ) ):
with torch.no_grad():
lowercase__ = batch['''ids'''].shape[-1]
lowercase__ = accelerator.unwrap_model(a_ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=a_ , **a_ )
# each task is generated batch_size times
lowercase__ = batch['''task_id'''].repeat(a_ )
lowercase__ = accelerator.pad_across_processes(
a_ , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowercase__ = generated_tokens.cpu().numpy()
lowercase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a_ , a_ ):
gen_token_dict[task].append(a_ )
lowercase__ = [[] for _ in range(a_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase__ = tokenizer.decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )
code_gens[task].append(remove_last_block(a_ ) )
return code_gens
def a ( ):
'''simple docstring'''
# Setup configuration
lowercase__ = HfArgumentParser(a_ )
lowercase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase__ = '''false'''
if args.num_workers is None:
lowercase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase__ = Accelerator()
set_seed(args.seed , device_specific=a_ )
# Load model and tokenizer
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase__ = tokenizer.eos_token
lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase__ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , a_ , a_ )] ),
}
# Load evaluation dataset and metric
lowercase__ = load_dataset('''openai_humaneval''' )
lowercase__ = load_metric('''code_eval''' )
lowercase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowercase__ = args.n_samples // args.batch_size
lowercase__ = TokenizedDataset(a_ , human_eval['''test'''] , n_copies=a_ , n_tasks=a_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase__ = DataLoader(a_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase__ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowercase__ = accelerator.prepare(a_ , a_ )
lowercase__ = complete_code(
a_ , a_ , a_ , a_ , n_tasks=a_ , batch_size=args.batch_size , **a_ , )
if accelerator.is_main_process:
lowercase__ = []
for task in tqdm(range(a_ ) ):
lowercase__ = human_eval['''test'''][task]['''test''']
lowercase__ = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowercase__ = code_eval_metric.compute(
references=a_ , predictions=a_ , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(a_ , a_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 705 |
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
lowercase__ = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def a ( lowerCamelCase_ = 1777 , lowerCamelCase_ = 1855 , lowerCamelCase_ = 8 ):
'''simple docstring'''
lowercase__ = base
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : int = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : WhisperForConditionalGeneration, lowerCamelCase : WhisperProcessor, lowerCamelCase : AutoencoderKL, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : UNetaDConditionModel, lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], lowerCamelCase : StableDiffusionSafetyChecker, lowerCamelCase : CLIPImageProcessor, ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=lowerCamelCase, speech_processor=lowerCamelCase, vae=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, unet=lowerCamelCase, scheduler=lowerCamelCase, feature_extractor=lowerCamelCase, )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Any, lowerCamelCase : Optional[Any], lowerCamelCase : Optional[Any]=16_000, lowerCamelCase : int = 512, lowerCamelCase : int = 512, lowerCamelCase : int = 50, lowerCamelCase : float = 7.5, lowerCamelCase : Optional[Union[str, List[str]]] = None, lowerCamelCase : Optional[int] = 1, lowerCamelCase : float = 0.0, lowerCamelCase : Optional[torch.Generator] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, **lowerCamelCase : Optional[Any], ):
'''simple docstring'''
lowercase__ = self.speech_processor.feature_extractor(
lowerCamelCase, return_tensors='''pt''', sampling_rate=lowerCamelCase ).input_features.to(self.device )
lowercase__ = self.speech_model.generate(lowerCamelCase, max_length=480_000 )
lowercase__ = self.speech_processor.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase, normalize=lowerCamelCase )[
0
]
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = 1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = len(lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt, lowerCamelCase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = [''''''] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
F""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(1, lowerCamelCase, 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device='''cpu''', dtype=lowerCamelCase ).to(
self.device )
else:
lowercase__ = torch.randn(lowerCamelCase, generator=lowerCamelCase, device=self.device, dtype=lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(lowerCamelCase, lowerCamelCase )
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase, lowerCamelCase, encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(lowerCamelCase ).sample
lowercase__ = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCamelCase, nsfw_content_detected=lowerCamelCase )
| 671 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
A__ : str = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
A__ : str = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
A__ : Union[str, Any] = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return float((preds == labels).mean() )
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="binary" ):
'''simple docstring'''
lowercase__ = simple_accuracy(lowercase_ , lowercase_ )
lowercase__ = float(fa_score(y_true=lowercase_ , y_pred=lowercase_ , average=lowercase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = {}
for id_pred, label in zip(lowercase_ , lowercase_ ):
lowercase__ = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowercase__ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase__ = [(pred, label)]
lowercase__ = [], []
for question, preds_labels in question_map.items():
lowercase__ = zip(*lowercase_ )
lowercase__ = fa_score(y_true=lowercase_ , y_pred=lowercase_ , average='''macro''' )
fas.append(lowercase_ )
lowercase__ = int(sum(pred == label for pred, label in preds_labels ) == len(lowercase_ ) )
ems.append(lowercase_ )
lowercase__ = float(sum(lowercase_ ) / len(lowercase_ ) )
lowercase__ = sum(lowercase_ ) / len(lowercase_ )
lowercase__ = float(fa_score(y_true=lowercase_ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Dict ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(self._get_feature_types() ), codebase_urls=[], reference_urls=[], format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None, )
def lowercase__ ( self : str ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def lowercase__ ( self : Any, lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__a, __a )}
elif self.config_name == "cb":
return acc_and_fa(__a, __a, fa_avg='''macro''' )
elif self.config_name == "record":
lowercase__ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowercase__ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(__a, __a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__a, __a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__a, __a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 707 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = [[] for _ in range(lowerCamelCase )]
lowercase__ = size
def __getitem__( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : str ):
'''simple docstring'''
return self._size
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[int], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase, lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.