code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations def UpperCamelCase_ ( __a ) -> int: a__ : Optional[int] = len(__a ) // 2 # choose the middle 3 elements a__ : Union[str, Any] = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import re def UpperCamelCase_ ( __a ) -> bool: a__ : Optional[Any] = re.compile( R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" ) return bool(re.search(__a , __a ) ) if __name__ == "__main__": UpperCamelCase : Tuple = """0094702343221""" print(is_sri_lankan_phone_number(phone))
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class A__ ( A__ ): """simple docstring""" _lowercase = 'M-CLIP' def __init__( self : Any , lowerCamelCase__ : Optional[Any]=1_024 , lowerCamelCase__ : Union[str, Any]=768 , **lowerCamelCase__ : Optional[Any] ): a__ : Any = transformerDimSize a__ : Any = imageDimSize super().__init__(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = MCLIPConfig def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ): super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) a__ : Optional[Any] = XLMRobertaModel(lowerCamelCase__ ) a__ : Any = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ): a__ : str = self.transformer(input_ids=lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] a__ : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowerCamelCase__ ), embs
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from math import asin, atan, cos, radians, sin, sqrt, tan UpperCamelCase : List[Any] = 637_8137.0 UpperCamelCase : Tuple = 635_6752.31_4245 UpperCamelCase : Optional[Any] = 637_8137 def UpperCamelCase_ ( __a , __a , __a , __a ) -> float: a__ : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A a__ : List[str] = atan((1 - flattening) * tan(radians(__a ) ) ) a__ : List[Any] = atan((1 - flattening) * tan(radians(__a ) ) ) a__ : str = radians(__a ) a__ : int = radians(__a ) # Equation a__ : Optional[int] = sin((phi_a - phi_a) / 2 ) a__ : Tuple = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda a__ : Optional[Any] = sqrt(sin_sq_phi + (cos(__a ) * cos(__a ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__a ) if __name__ == "__main__": import doctest doctest.testmod()
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCamelCase_ ( __a , __a=False ) -> Union[str, Any]: try: a__ : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. a__ : Dict = default else: # KEY is set, convert it to True or False. try: a__ : Tuple = strtobool(__a ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_SLOW""", default=False) UpperCamelCase : Any = parse_flag_from_env("""RUN_REMOTE""", default=False) UpperCamelCase : Any = parse_flag_from_env("""RUN_LOCAL""", default=True) UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression UpperCamelCase : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") UpperCamelCase : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") UpperCamelCase : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio UpperCamelCase : Optional[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam UpperCamelCase : Dict = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility UpperCamelCase : Dict = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows UpperCamelCase : List[str] = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def UpperCamelCase_ ( __a ) -> Optional[int]: try: import faiss # noqa except ImportError: a__ : int = unittest.skip("test requires faiss" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Union[str, Any]: try: import regex # noqa except ImportError: a__ : int = unittest.skip("test requires regex" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> int: try: import elasticsearch # noqa except ImportError: a__ : Optional[int] = unittest.skip("test requires elasticsearch" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> int: try: import sqlalchemy # noqa except ImportError: a__ : Optional[Any] = unittest.skip("test requires sqlalchemy" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> str: if not config.TORCH_AVAILABLE: a__ : List[Any] = unittest.skip("test requires PyTorch" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Any: if not config.TF_AVAILABLE: a__ : str = unittest.skip("test requires TensorFlow" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Optional[Any]: if not config.JAX_AVAILABLE: a__ : List[Any] = unittest.skip("test requires JAX" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Optional[int]: if not config.PIL_AVAILABLE: a__ : int = unittest.skip("test requires Pillow" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> str: try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(__a ) else: return test_case def UpperCamelCase_ ( __a ) -> Optional[int]: try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(__a ) else: return test_case def UpperCamelCase_ ( __a ) -> Optional[Any]: try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(__a ) else: return test_case def UpperCamelCase_ ( __a ) -> Tuple: def _require_spacy_model(__a ): try: import spacy # noqa F401 spacy.load(__a ) except ImportError: return unittest.skip("test requires spacy" )(__a ) except OSError: return unittest.skip("test requires spacy model '{}'".format(__a ) )(__a ) else: return test_case return _require_spacy_model def UpperCamelCase_ ( __a ) -> Dict: try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(__a ) else: return test_case def UpperCamelCase_ ( __a ) -> Union[str, Any]: try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(__a ) else: return test_case def UpperCamelCase_ ( __a ) -> int: if not _run_slow_tests or _run_slow_tests == 0: a__ : Tuple = unittest.skip("test is slow" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Dict: if not _run_local_tests or _run_local_tests == 0: a__ : List[Any] = unittest.skip("test is local" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> Tuple: if not _run_packaged_tests or _run_packaged_tests == 0: a__ : List[str] = unittest.skip("test is packaged" )(__a ) return test_case def UpperCamelCase_ ( __a ) -> int: if not _run_remote_tests or _run_remote_tests == 0: a__ : List[Any] = unittest.skip("test requires remote" )(__a ) return test_case def UpperCamelCase_ ( *__a ) -> Union[str, Any]: def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(__a ) and name.startswith("test" ): for decorator in decorators: a__ : str = decorator(__a ) setattr(cls , __a , __a ) return cls return decorate class A__ ( A__ ): """simple docstring""" pass class A__ ( A__ ): """simple docstring""" _lowercase = 0 _lowercase = 1 _lowercase = 2 @contextmanager def UpperCamelCase_ ( __a=OfflineSimulationMode.CONNECTION_FAILS , __a=1e-16 ) -> Dict: a__ : int = requests.Session().request def timeout_request(__a , __a , __a , **__a ): # Change the url to an invalid url so that the connection hangs a__ : Optional[Any] = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) a__ : Optional[int] = timeout try: return online_request(__a , __a , **__a ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier a__ : Dict = url a__ : Optional[int] = e.args[0] a__ : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]''' ),) a__ : Dict = (max_retry_error,) raise def raise_connection_error(__a , __a , **__a ): raise requests.ConnectionError("Offline mode is enabled." , request=__a ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , __a ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , __a ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , __a ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def UpperCamelCase_ ( *__a , **__a ) -> int: a__ : int = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__a , **__a ) as tmp_dir: try: os.chdir(__a ) yield finally: os.chdir(__a ) @contextmanager def UpperCamelCase_ ( ) -> Optional[int]: import gc gc.collect() a__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCamelCase_ ( ) -> Union[str, Any]: import gc gc.collect() a__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCamelCase_ ( __a , __a ) -> Union[str, Any]: return deepcopy(__a ).integers(0 , 100 , 10 ).tolist() == deepcopy(__a ).integers(0 , 100 , 10 ).tolist() def UpperCamelCase_ ( __a ) -> Tuple: import decorator from requests.exceptions import HTTPError def _wrapper(__a , *__a , **__a ): try: return func(*__a , **__a ) except HTTPError as err: if str(__a ).startswith("500" ) or str(__a ).startswith("502" ): pytest.xfail(str(__a ) ) raise err return decorator.decorator(_wrapper , __a ) class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ): a__ : str = returncode a__ : Optional[Any] = stdout a__ : Any = stderr async def UpperCamelCase_ ( __a , __a ) -> Tuple: while True: a__ : int = await stream.readline() if line: callback(__a ) else: break async def UpperCamelCase_ ( __a , __a=None , __a=None , __a=None , __a=False , __a=False ) -> _RunOutput: if echo: print("\nRunning: " , " ".join(__a ) ) a__ : Optional[Any] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__a , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) a__ : int = [] a__ : List[Any] = [] def tee(__a , __a , __a , __a="" ): a__ : int = line.decode("utf-8" ).rstrip() sink.append(__a ) if not quiet: print(__a , __a , file=__a ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __a : tee(__a , __a , sys.stdout , label="stdout:" ) ), _read_stream(p.stderr , lambda __a : tee(__a , __a , sys.stderr , label="stderr:" ) ), ] , timeout=__a , ) return _RunOutput(await p.wait() , __a , __a ) def UpperCamelCase_ ( __a , __a=None , __a=None , __a=180 , __a=False , __a=True ) -> _RunOutput: a__ : Union[str, Any] = asyncio.get_event_loop() a__ : str = loop.run_until_complete( _stream_subprocess(__a , env=__a , stdin=__a , timeout=__a , quiet=__a , echo=__a ) ) a__ : str = " ".join(__a ) if result.returncode > 0: a__ : List[str] = "\n".join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' ) return result def UpperCamelCase_ ( ) -> Optional[int]: a__ : Dict = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" ) a__ : Optional[int] = re.sub(R"^gw" , "" , __a , 0 , re.M ) return int(__a ) def UpperCamelCase_ ( ) -> str: a__ : int = 29_500 a__ : Tuple = pytest_xdist_worker_id() return port + uniq_delta
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def UpperCamelCase_ ( __a , __a=False ) -> Dict: try: a__ : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. a__ : Tuple = default else: # KEY is set, convert it to True or False. try: a__ : int = strtobool(__a ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value UpperCamelCase : int = parse_flag_from_env("""RUN_SLOW""", default=False) def UpperCamelCase_ ( __a ) -> Dict: return unittest.skip("Test was skipped" )(__a ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: return unittest.skipUnless(_run_slow_tests , "test is slow" )(__a ) def UpperCamelCase_ ( __a ) -> int: return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(__a ) def UpperCamelCase_ ( __a ) -> Optional[int]: return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(__a ) def UpperCamelCase_ ( __a ) -> Tuple: return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(__a ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(__a ) def UpperCamelCase_ ( __a ) -> Dict: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(__a ) def UpperCamelCase_ ( __a ) -> int: return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(__a ) def UpperCamelCase_ ( __a ) -> Any: return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(__a ) def UpperCamelCase_ ( __a ) -> Any: return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(__a ) def UpperCamelCase_ ( __a ) -> Tuple: return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(__a ) def UpperCamelCase_ ( __a ) -> Dict: return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(__a ) def UpperCamelCase_ ( __a ) -> List[Any]: return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(__a ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(__a ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(__a ) def UpperCamelCase_ ( __a ) -> Optional[int]: return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(__a ) def UpperCamelCase_ ( __a=None , __a=None ) -> List[Any]: if test_case is None: return partial(__a , version=__a ) return unittest.skipUnless(is_torch_version(">=" , __a ) , f'''test requires torch version >= {version}''' )(__a ) def UpperCamelCase_ ( __a ) -> str: return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(__a ) def UpperCamelCase_ ( __a ) -> Optional[Any]: return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(__a ) def UpperCamelCase_ ( __a ) -> Any: return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(__a ) UpperCamelCase : int = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def UpperCamelCase_ ( __a ) -> List[str]: return unittest.skipUnless( _atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(__a ) class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = True @classmethod def _UpperCamelCase( cls : Optional[int] ): a__ : Any = tempfile.mkdtemp() @classmethod def _UpperCamelCase( cls : List[str] ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _UpperCamelCase( self : Union[str, Any] ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowerCamelCase__ ) class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[int] ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[mock.Mock, List[mock.Mock]] ): a__ : List[Any] = mocks if isinstance(lowerCamelCase__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def UpperCamelCase_ ( __a ) -> Any: a__ : str = AcceleratorState() a__ : Dict = tensor[None].clone().to(state.device ) a__ : Tuple = gather(__a ).cpu() a__ : List[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __a ): return False return True class A__ : """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ): a__ : Tuple = returncode a__ : Optional[int] = stdout a__ : Tuple = stderr async def UpperCamelCase_ ( __a , __a ) -> int: while True: a__ : int = await stream.readline() if line: callback(__a ) else: break async def UpperCamelCase_ ( __a , __a=None , __a=None , __a=None , __a=False , __a=False ) -> _RunOutput: if echo: print("\nRunning: " , " ".join(__a ) ) a__ : str = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__a , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) a__ : Optional[Any] = [] a__ : Tuple = [] def tee(__a , __a , __a , __a="" ): a__ : str = line.decode("utf-8" ).rstrip() sink.append(__a ) if not quiet: print(__a , __a , file=__a ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __a : tee(__a , __a , sys.stdout , label="stdout:" ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __a : tee(__a , __a , sys.stderr , label="stderr:" ) ) ), ] , timeout=__a , ) return _RunOutput(await p.wait() , __a , __a ) def UpperCamelCase_ ( __a , __a=None , __a=None , __a=180 , __a=False , __a=True ) -> _RunOutput: a__ : str = asyncio.get_event_loop() a__ : List[str] = loop.run_until_complete( _stream_subprocess(__a , env=__a , stdin=__a , timeout=__a , quiet=__a , echo=__a ) ) a__ : List[Any] = " ".join(__a ) if result.returncode > 0: a__ : Union[str, Any] = "\n".join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) return result class A__ ( A__ ): """simple docstring""" pass def UpperCamelCase_ ( __a , __a=False ) -> int: try: a__ : List[Any] = subprocess.check_output(__a , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__a , "decode" ): a__ : Optional[int] = output.decode("utf-8" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{' '.join(__a )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
import math import qiskit def UpperCamelCase_ ( __a = 1 , __a = 1 , __a = 1 ) -> qiskit.result.counts.Counts: if ( isinstance(__a , __a ) or isinstance(__a , __a ) or isinstance(__a , __a ) ): raise TypeError("inputs must be integers." ) if (input_a < 0) or (input_a < 0) or (carry_in < 0): raise ValueError("inputs must be positive." ) if ( (math.floor(__a ) != input_a) or (math.floor(__a ) != input_a) or (math.floor(__a ) != carry_in) ): raise ValueError("inputs must be exact integers." ) if (input_a > 2) or (input_a > 2) or (carry_in > 2): raise ValueError("inputs must be less or equal to 2." ) # build registers a__ : Union[str, Any] = qiskit.QuantumRegister(4 , "qr" ) a__ : Optional[Any] = qiskit.ClassicalRegister(2 , "cr" ) # list the entries a__ : int = [input_a, input_a, carry_in] a__ : str = qiskit.QuantumCircuit(__a , __a ) for i in range(0 , 3 ): if entry[i] == 2: quantum_circuit.h(__a ) # for hadamard entries elif entry[i] == 1: quantum_circuit.x(__a ) # for 1 entries elif entry[i] == 0: quantum_circuit.i(__a ) # for 0 entries # build the circuit quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate quantum_circuit.cx(0 , 1 ) quantum_circuit.ccx(1 , 2 , 3 ) quantum_circuit.cx(1 , 2 ) quantum_circuit.cx(0 , 1 ) quantum_circuit.measure([2, 3] , __a ) # measure the last two qbits a__ : Any = qiskit.Aer.get_backend("aer_simulator" ) a__ : List[Any] = qiskit.execute(__a , __a , shots=1_000 ) return job.result().get_counts(__a ) if __name__ == "__main__": print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Any ): a__ : str = params a__ : Any = np.array(lowerCamelCase__ ) a__ : Dict = np.array([len(lowerCamelCase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): return (self.token_ids[index], self.lengths[index]) def __len__( self : Any ): return len(self.lengths ) def _UpperCamelCase( self : Dict ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def _UpperCamelCase( self : int ): a__ : List[Any] = self.params.max_model_input_size a__ : str = self.lengths > max_len logger.info(f'''Splitting {sum(lowerCamelCase__ )} too long sequences.''' ) def divide_chunks(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ): return [l[i : i + n] for i in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ )] a__ : Union[str, Any] = [] a__ : str = [] if self.params.mlm: a__, a__ : List[str] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: a__, a__ : Dict = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: a__ : Tuple = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: a__ : Optional[Any] = np.insert(lowerCamelCase__ , 0 , lowerCamelCase__ ) if sub_s[-1] != sep_id: a__ : int = np.insert(lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ ) assert len(lowerCamelCase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowerCamelCase__ ) new_tok_ids.extend(lowerCamelCase__ ) new_lengths.extend([len(lowerCamelCase__ ) for l in sub_seqs] ) a__ : Dict = np.array(lowerCamelCase__ ) a__ : Union[str, Any] = np.array(lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : Tuple = len(self ) a__ : str = self.lengths > 11 a__ : Union[str, Any] = self.token_ids[indices] a__ : List[Any] = self.lengths[indices] a__ : Tuple = len(self ) logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def _UpperCamelCase( self : str ): if "unk_token" not in self.params.special_tok_ids: return else: a__ : Tuple = self.params.special_tok_ids["unk_token"] a__ : Optional[int] = len(self ) a__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) a__ : Any = (unk_occs / self.lengths) < 0.5 a__ : str = self.token_ids[indices] a__ : Optional[int] = self.lengths[indices] a__ : List[Any] = len(self ) logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def _UpperCamelCase( self : Dict ): if not self.params.is_master: return logger.info(f'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def _UpperCamelCase( self : Dict , lowerCamelCase__ : Tuple ): a__ : str = [t[0] for t in batch] a__ : Any = [t[1] for t in batch] assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) # Max for paddings a__ : Dict = max(lowerCamelCase__ ) # Pad token ids if self.params.mlm: a__ : Optional[int] = self.params.special_tok_ids["pad_token"] else: a__ : int = self.params.special_tok_ids["unk_token"] a__ : str = [list(t.astype(lowerCamelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase__ )) for t in token_ids] assert len(tk_ ) == len(lowerCamelCase__ ) assert all(len(lowerCamelCase__ ) == max_seq_len_ for t in tk_ ) a__ : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) a__ : Optional[int] = torch.tensor(lowerCamelCase__ ) # (bs) return tk_t, lg_t
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : List[Any] ): a__ : Union[str, Any] = inspect.getfile(accelerate.test_utils ) a__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) a__ : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) a__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def _UpperCamelCase( self : Optional[int] ): print(f'''Found {torch.cuda.device_count()} devices.''' ) a__ : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Optional[int] ): print(f'''Found {torch.cuda.device_count()} devices.''' ) a__ : Tuple = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Tuple ): a__ : List[str] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) @require_multi_gpu def _UpperCamelCase( self : Any ): print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) a__ : Optional[int] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": UpperCamelCase : str = Accelerator() UpperCamelCase : int = (accelerator.state.process_index + 2, 10) UpperCamelCase : Optional[Any] = torch.randint(0, 10, shape).to(accelerator.device) UpperCamelCase : Optional[int] = """""" UpperCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." UpperCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." UpperCamelCase : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[str] = logging.get_logger(__name__) def UpperCamelCase_ ( __a , __a=False ) -> str: a__ : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" a__ : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def UpperCamelCase_ ( __a , __a , __a=False ) -> List[str]: for i in range(config.num_hidden_layers ): if base_model: a__ : Union[str, Any] = "" else: a__ : str = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a__ : Tuple = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) a__ : Any = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict a__ : List[str] = in_proj_weight[ : config.hidden_size, : ] a__ : List[Any] = in_proj_bias[: config.hidden_size] a__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a__ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a__ : str = in_proj_weight[ -config.hidden_size :, : ] a__ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Union[str, Any] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__a , __a ) def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : Optional[int] = dct.pop(__a ) a__ : str = val def UpperCamelCase_ ( ) -> Dict: a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : Dict = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( __a , __a , __a=True ) -> str: a__ : Tuple = ViTConfig() # patch_size if model_name[-1] == "8": a__ : Any = 8 # set labels if required if not base_model: a__ : str = 1_000 a__ : Tuple = "huggingface/label-files" a__ : int = "imagenet-1k-id2label.json" a__ : Dict = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : List[Any] = {int(__a ): v for k, v in idalabel.items()} a__ : Tuple = idalabel a__ : Any = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: a__ : Dict = 384 a__ : Tuple = 1_536 a__ : str = 12 a__ : Union[str, Any] = 6 # load original model from torch hub a__ : List[Any] = torch.hub.load("facebookresearch/dino:main" , __a ) original_model.eval() # load state_dict of original model, remove and rename some keys a__ : Dict = original_model.state_dict() if base_model: remove_classification_head_(__a ) a__ : Any = create_rename_keys(__a , base_model=__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_q_k_v(__a , __a , __a ) # load HuggingFace model if base_model: a__ : List[str] = ViTModel(__a , add_pooling_layer=__a ).eval() else: a__ : List[str] = ViTForImageClassification(__a ).eval() model.load_state_dict(__a ) # Check outputs on an image, prepared by ViTImageProcessor a__ : Optional[int] = ViTImageProcessor() a__ : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) a__ : List[str] = encoding["pixel_values"] a__ : List[Any] = model(__a ) if base_model: a__ : List[Any] = original_model(__a ) assert torch.allclose(__a , outputs.last_hidden_state[:, 0, :] , atol=1e-1 ) else: a__ : List[str] = original_model(__a ) assert logits.shape == outputs.logits.shape assert torch.allclose(__a , outputs.logits , atol=1e-3 ) Path(__a ).mkdir(exist_ok=__a ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) UpperCamelCase : int = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : """simple docstring""" def __init__( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int=0.2 , lowerCamelCase__ : Dict=0.2 ): a__ : Optional[int] = bp_numa a__ : Tuple = bp_numa a__ : Optional[Any] = bp_numa a__ : str = conva_get[:2] a__ : Optional[int] = conva_get[2] a__ : Any = size_pa a__ : List[str] = rate_w a__ : str = rate_t a__ : Optional[int] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a__ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) a__ : List[str] = -2 * np.random.rand(self.conva[1] ) + 1 a__ : List[str] = -2 * np.random.rand(self.num_bpa ) + 1 a__ : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Optional[Any] ): # save model dict with pickle a__ : int = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(lowerCamelCase__ , "wb" ) as f: pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCamelCase( cls : Tuple , lowerCamelCase__ : Tuple ): # read saved model with open(lowerCamelCase__ , "rb" ) as f: a__ : Tuple = pickle.load(lowerCamelCase__ ) # noqa: S301 a__ : List[str] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) a__ : Optional[Any] = model_dic.get("size_pooling1" ) a__ : Optional[Any] = model_dic.get("num_bp1" ) a__ : Tuple = model_dic.get("num_bp2" ) a__ : int = model_dic.get("num_bp3" ) a__ : Tuple = model_dic.get("rate_weight" ) a__ : Optional[Any] = model_dic.get("rate_thre" ) # create model instance a__ : Tuple = CNN(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # modify model parameter a__ : Tuple = model_dic.get("w_conv1" ) a__ : int = model_dic.get("wkj" ) a__ : List[str] = model_dic.get("vji" ) a__ : Optional[int] = model_dic.get("thre_conv1" ) a__ : Optional[int] = model_dic.get("thre_bp2" ) a__ : Optional[int] = model_dic.get("thre_bp3" ) return conv_ins def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): return 1 / (1 + np.exp(-1 * x )) def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): return round(lowerCamelCase__ , 3 ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ): # convolution process a__ : Any = convs[0] a__ : str = convs[1] a__ : int = np.shape(lowerCamelCase__ )[0] # get the data slice of original image data, data_focus a__ : Optional[int] = [] for i_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ): for j_focus in range(0 , size_data - size_conv + 1 , lowerCamelCase__ ): a__ : List[str] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(lowerCamelCase__ ) # calculate the feature map of every single kernel, and saved as list of matrix a__ : str = [] a__ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(lowerCamelCase__ ): a__ : Optional[Any] = [] for i_focus in range(len(lowerCamelCase__ ) ): a__ : Tuple = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(lowerCamelCase__ ) ) a__ : Optional[int] = np.asmatrix(lowerCamelCase__ ).reshape( lowerCamelCase__ , lowerCamelCase__ ) data_featuremap.append(lowerCamelCase__ ) # expanding the data slice to One dimenssion a__ : Union[str, Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(lowerCamelCase__ ) ) a__ : Union[str, Any] = np.asarray(lowerCamelCase__ ) return focus_list, data_featuremap def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]="average_pool" ): # pooling process a__ : Union[str, Any] = len(featuremaps[0] ) a__ : Union[str, Any] = int(size_map / size_pooling ) a__ : List[str] = [] for i_map in range(len(lowerCamelCase__ ) ): a__ : str = featuremaps[i_map] a__ : Tuple = [] for i_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ): for j_focus in range(0 , lowerCamelCase__ , lowerCamelCase__ ): a__ : Any = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(lowerCamelCase__ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(lowerCamelCase__ ) ) a__ : List[str] = np.asmatrix(lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ) featuremap_pooled.append(lowerCamelCase__ ) return featuremap_pooled def _UpperCamelCase( self : Any , lowerCamelCase__ : str ): # expanding three dimension data to one dimension list a__ : Optional[int] = [] for i in range(len(lowerCamelCase__ ) ): a__ : Dict = np.shape(data[i] ) a__ : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) a__ : List[Any] = data_listed.getA().tolist()[0] data_expanded.extend(lowerCamelCase__ ) a__ : Dict = np.asarray(lowerCamelCase__ ) return data_expanded def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Tuple ): # expanding matrix to one dimension list a__ : str = np.asarray(lowerCamelCase__ ) a__ : Optional[int] = np.shape(lowerCamelCase__ ) a__ : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def _UpperCamelCase( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Any ): a__ : int = [] a__ : int = 0 for i_map in range(lowerCamelCase__ ): a__ : List[Any] = np.ones((size_map, size_map) ) for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ): for j in range(0 , lowerCamelCase__ , lowerCamelCase__ ): a__ : Union[str, Any] = pd_pool[ i_pool ] a__ : str = i_pool + 1 a__ : Any = np.multiply( lowerCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(lowerCamelCase__ ) return pd_all def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=bool ): # model traning print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(lowerCamelCase__ )) ) print((" - - Shape: Teach_Data ", np.shape(lowerCamelCase__ )) ) a__ : str = 0 a__ : List[str] = [] a__ : int = 10_000 while rp < n_repeat and mse >= error_accuracy: a__ : Optional[int] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(lowerCamelCase__ ) ): # print('------------Learning Image: %d--------------'%p) a__ : Optional[Any] = np.asmatrix(datas_train[p] ) a__ : str = np.asarray(datas_teach[p] ) a__, a__ : Dict = self.convolute( lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : int = self.pooling(lowerCamelCase__ , self.size_poolinga ) a__ : Dict = np.shape(lowerCamelCase__ ) a__ : List[str] = self._expand(lowerCamelCase__ ) a__ : Dict = data_bp_input a__ : Dict = np.dot(lowerCamelCase__ , self.vji.T ) - self.thre_bpa a__ : Optional[int] = self.sig(lowerCamelCase__ ) a__ : int = np.dot(lowerCamelCase__ , self.wkj.T ) - self.thre_bpa a__ : int = self.sig(lowerCamelCase__ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- a__ : Tuple = np.multiply( (data_teach - bp_outa) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) ) a__ : Optional[Any] = np.multiply( np.dot(lowerCamelCase__ , self.wkj ) , np.multiply(lowerCamelCase__ , (1 - bp_outa) ) ) a__ : Optional[Any] = np.dot(lowerCamelCase__ , self.vji ) a__ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) a__ : Optional[Any] = pd_conva_pooled.T.getA().tolist() a__ : Dict = self._calculate_gradient_from_pool( lowerCamelCase__ , lowerCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): a__ : List[str] = self._expand_mat(pd_conva_all[k_conv] ) a__ : Any = self.rate_weight * np.dot(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) a__ : Any = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer a__ : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight a__ : str = self.vji + pd_j_all.T * bp_outa * self.rate_weight a__ : Optional[int] = self.thre_bpa - pd_k_all * self.rate_thre a__ : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image a__ : str = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) a__ : str = rp + 1 a__ : Tuple = error_count / patterns all_mse.append(lowerCamelCase__ ) def draw_error(): a__ : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(lowerCamelCase__ , "+-" ) plt.plot(lowerCamelCase__ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(lowerCamelCase__ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCamelCase( self : Tuple , lowerCamelCase__ : int ): # model predict a__ : Optional[int] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(lowerCamelCase__ )) ) for p in range(len(lowerCamelCase__ ) ): a__ : str = np.asmatrix(datas_test[p] ) a__, a__ : Tuple = self.convolute( lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : Optional[int] = self.pooling(lowerCamelCase__ , self.size_poolinga ) a__ : Any = self._expand(lowerCamelCase__ ) a__ : Any = data_bp_input a__ : str = bp_outa * self.vji.T - self.thre_bpa a__ : Tuple = self.sig(lowerCamelCase__ ) a__ : Any = bp_outa * self.wkj.T - self.thre_bpa a__ : List[str] = self.sig(lowerCamelCase__ ) produce_out.extend(bp_outa.getA().tolist() ) a__ : List[str] = [list(map(self.do_round , lowerCamelCase__ ) ) for each in produce_out] return np.asarray(lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any ): # return the data of image after convoluting process so we can check it out a__ : List[Any] = np.asmatrix(lowerCamelCase__ ) a__, a__ : List[Any] = self.convolute( lowerCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) a__ : Any = self.pooling(lowerCamelCase__ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : Optional[int] = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } UpperCamelCase : Any = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ElectraTokenizer def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]="[UNK]" , lowerCamelCase__ : List[Any]="[SEP]" , lowerCamelCase__ : List[Any]="[PAD]" , lowerCamelCase__ : str="[CLS]" , lowerCamelCase__ : List[Any]="[MASK]" , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : str , ): super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars ): a__ : Any = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) ) a__ : Union[str, Any] = do_lower_case a__ : Optional[int] = strip_accents a__ : int = tokenize_chinese_chars a__ : Tuple = normalizer_class(**lowerCamelCase__ ) a__ : Union[str, Any] = do_lower_case def _UpperCamelCase( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None ): a__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Optional[int] = [self.sep_token_id] a__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : Union[str, Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) UpperCamelCase : int = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class A__ ( A__ ): """simple docstring""" _lowercase = 'gptsan-japanese' _lowercase = [ 'past_key_values', ] _lowercase = { 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str , lowerCamelCase__ : Tuple=36_000 , lowerCamelCase__ : Optional[int]=1_280 , lowerCamelCase__ : Any=1_024 , lowerCamelCase__ : Dict=8_192 , lowerCamelCase__ : Any=4_096 , lowerCamelCase__ : Optional[int]=128 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : List[Any]=128 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : int=1E-5 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any="float32" , lowerCamelCase__ : str=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : str=0.002 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=35_998 , lowerCamelCase__ : List[str]=35_995 , lowerCamelCase__ : int=35_999 , **lowerCamelCase__ : int , ): a__ : Optional[Any] = vocab_size a__ : Tuple = max_position_embeddings a__ : str = d_model a__ : Tuple = d_ff a__ : Tuple = d_ext a__ : List[Any] = d_spout a__ : str = num_switch_layers a__ : List[Any] = num_ext_layers a__ : List[Any] = num_switch_layers + num_ext_layers a__ : Dict = num_heads a__ : List[str] = num_experts a__ : Optional[Any] = expert_capacity a__ : str = dropout_rate a__ : int = layer_norm_epsilon a__ : Any = router_bias a__ : Dict = router_jitter_noise a__ : Any = router_dtype a__ : Optional[Any] = router_ignore_padding_tokens a__ : Optional[Any] = output_hidden_states a__ : List[Any] = output_attentions a__ : Any = initializer_factor a__ : List[Any] = output_router_logits a__ : Dict = use_cache super().__init__( separator_token_id=lowerCamelCase__ , pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
from __future__ import annotations def UpperCamelCase_ ( __a ) -> bool: a__ : str = str(__a ) return n == n[::-1] def UpperCamelCase_ ( __a = 1_000_000 ) -> str: a__ : Any = 0 for i in range(1 , __a ): if is_palindrome(__a ) and is_palindrome(bin(__a ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[str] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCamelCase : Union[str, Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> Any: a__ : Union[str, Any] = state_dict.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a ) -> List[str]: a__ : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: a__ : str = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) a__ : Tuple = value else: a__ : List[str] = value return new_state_dict def UpperCamelCase_ ( __a , __a=False ) -> Any: a__ : Tuple = "" if is_panoptic: a__ : List[Any] = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) a__ : Optional[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) a__ : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict a__ : Tuple = in_proj_weight[:256, :] a__ : Any = in_proj_bias[:256] a__ : Tuple = in_proj_weight[256:512, :] a__ : List[str] = in_proj_bias[256:512] a__ : Any = in_proj_weight[-256:, :] a__ : Optional[int] = in_proj_bias[-256:] def UpperCamelCase_ ( ) -> Tuple: a__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : List[Any] = Image.open(requests.get(__a , stream=__a ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : Tuple = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: a__ : Tuple = "resnet101" if "dc5" in model_name: a__ : str = True a__ : int = "panoptic" in model_name if is_panoptic: a__ : List[str] = 250 else: a__ : str = 91 a__ : Union[str, Any] = "huggingface/label-files" a__ : int = "coco-detection-id2label.json" a__ : Optional[Any] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : Any = {int(__a ): v for k, v in idalabel.items()} a__ : Union[str, Any] = idalabel a__ : Optional[Any] = {v: k for k, v in idalabel.items()} # load image processor a__ : Dict = "coco_panoptic" if is_panoptic else "coco_detection" a__ : int = ConditionalDetrImageProcessor(format=__a ) # prepare image a__ : Any = prepare_img() a__ : Dict = image_processor(images=__a , return_tensors="pt" ) a__ : List[Any] = encoding["pixel_values"] logger.info(f'''Converting model {model_name}...''' ) # load original model from torch hub a__ : Optional[Any] = torch.hub.load("DeppMeng/ConditionalDETR" , __a , pretrained=__a ).eval() a__ : List[str] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: a__ : str = "conditional_detr." + src rename_key(__a , __a , __a ) a__ : Tuple = rename_backbone_keys(__a ) # query, key and value matrices need special treatment read_in_q_k_v(__a , is_panoptic=__a ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them a__ : str = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): a__ : Any = state_dict.pop(__a ) a__ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: a__ : Dict = state_dict.pop(__a ) a__ : Tuple = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: a__ : Optional[Any] = state_dict.pop(__a ) a__ : Dict = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): a__ : str = state_dict.pop(__a ) a__ : Union[str, Any] = val # finally, create HuggingFace model and load state dict a__ : str = ConditionalDetrForSegmentation(__a ) if is_panoptic else ConditionalDetrForObjectDetection(__a ) model.load_state_dict(__a ) model.eval() model.push_to_hub(repo_id=__a , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion a__ : Dict = conditional_detr(__a ) a__ : int = model(__a ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 ) # Save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) image_processor.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) UpperCamelCase : Optional[int] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[int] = { """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : Tuple = { """squeezebert/squeezebert-uncased""": 512, """squeezebert/squeezebert-mnli""": 512, """squeezebert/squeezebert-mnli-headless""": 512, } UpperCamelCase : str = { """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_INIT_CONFIGURATION _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = SqueezeBertTokenizer def __init__( self : int , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Dict="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Tuple="[PAD]" , lowerCamelCase__ : Tuple="[CLS]" , lowerCamelCase__ : str="[MASK]" , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Tuple , ): super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars ): a__ : Union[str, Any] = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) ) a__ : List[Any] = do_lower_case a__ : Optional[Any] = strip_accents a__ : str = tokenize_chinese_chars a__ : Any = normalizer_class(**lowerCamelCase__ ) a__ : Any = do_lower_case def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any]=None ): a__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[Any] = [self.sep_token_id] a__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""} UpperCamelCase : int = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", } } # TODO(PVP) - this should be removed in Transformers v5 UpperCamelCase : Tuple = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } UpperCamelCase : Optional[Any] = """▁""" class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] def __init__( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[int]="<unk>" , lowerCamelCase__ : Tuple="<pad>" , lowerCamelCase__ : str=100 , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[Dict[str, Any]] = None , lowerCamelCase__ : Optional[Any]=True , **lowerCamelCase__ : Tuple , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: a__ : Tuple = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a__ : int = len(set(filter(lambda lowerCamelCase__ : bool("extra_id" in str(lowerCamelCase__ ) ) , lowerCamelCase__ ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens" ) if legacy: logger.warning_once( f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to''' " read the related pull request available at https://github.com/huggingface/transformers/pull/24565" ) a__ : Optional[Any] = legacy a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , extra_ids=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Any = vocab_file a__ : List[Any] = extra_ids a__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCamelCase__ ) @staticmethod def _UpperCamelCase( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: a__ : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" f''' {pretrained_model_name_or_path} automatically truncating your input to''' f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCamelCase__ , ) return max_model_length @property def _UpperCamelCase( self : List[Any] ): return self.sp_model.get_piece_size() + self._extra_ids def _UpperCamelCase( self : str ): a__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(lowerCamelCase__ )) + [1] return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] def _UpperCamelCase( self : Union[str, Any] ): return list( set(filter(lambda lowerCamelCase__ : bool(re.search(r"<extra_id_\d+>" , lowerCamelCase__ ) ) is not None , self.additional_special_tokens ) ) ) def _UpperCamelCase( self : int ): return [self._convert_token_to_id(lowerCamelCase__ ) for token in self.get_sentinel_tokens()] def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[int] ): if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def _UpperCamelCase( self : int , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : str = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _UpperCamelCase( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : int = self._add_eos_if_not_present(lowerCamelCase__ ) if token_ids_a is None: return token_ids_a else: a__ : str = self._add_eos_if_not_present(lowerCamelCase__ ) return token_ids_a + token_ids_a def __getstate__( self : Any ): a__ : Tuple = self.__dict__.copy() a__ : Optional[Any] = None return state def __setstate__( self : int , lowerCamelCase__ : Tuple ): a__ : int = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): a__ : int = {} a__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : "TextInput" , **lowerCamelCase__ : Union[str, Any] ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: a__ : Dict = SPIECE_UNDERLINE + text.replace(lowerCamelCase__ , " " ) return super().tokenize(lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ): if not self.legacy: a__ : Optional[Any] = text.startswith(lowerCamelCase__ ) if is_first: a__ : Optional[int] = text[1:] a__ : Dict = self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowerCamelCase__ ): a__ : Optional[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str ): if token.startswith("<extra_id_" ): a__ : int = re.match(r"<extra_id_(\d+)>" , lowerCamelCase__ ) a__ : Optional[int] = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): if index < self.sp_model.get_piece_size(): a__ : Union[str, Any] = self.sp_model.IdToPiece(lowerCamelCase__ ) else: a__ : Tuple = f'''<extra_id_{self.vocab_size - 1 - index}>''' return token def _UpperCamelCase( self : int , lowerCamelCase__ : str ): a__ : str = [] a__ : Optional[int] = "" a__ : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase__ ) + token a__ : Dict = True a__ : Optional[int] = [] else: current_sub_tokens.append(lowerCamelCase__ ) a__ : int = False out_string += self.sp_model.decode(lowerCamelCase__ ) return out_string.strip() def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if not os.path.isdir(lowerCamelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return a__ : int = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ , "wb" ) as fi: a__ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,)
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
import datasets from .evaluate import evaluate UpperCamelCase : Any = """\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } """ UpperCamelCase : str = """ This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. """ UpperCamelCase : List[str] = """ Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair as given in the references (see below) - 'prediction_text': the text of the answer references: List of question-answers dictionaries with the following key-values: - 'id': id of the question-answer pair (see above), - 'answers': a Dict in the SQuAD dataset format { 'text': list of possible texts for the answer, as a list of strings 'answer_start': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: 'exact_match': Exact match (the normalized answer exactly match the gold answer) 'f1': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}] >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] >>> squad_metric = datasets.load_metric(\"squad\") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {'exact_match': 100.0, 'f1': 100.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def _UpperCamelCase( self : List[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : int ): a__ : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} a__ : Optional[int] = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] a__ : Any = evaluate(dataset=lowerCamelCase__ , predictions=lowerCamelCase__ ) return score
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase_ ( ) -> Any: a__ : str = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=__a ) a__ : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" ) # Register commands get_config_parser(subparsers=__a ) env_command_parser(subparsers=__a ) launch_command_parser(subparsers=__a ) tpu_command_parser(subparsers=__a ) test_command_parser(subparsers=__a ) # Let's go a__ : List[Any] = parser.parse_args() if not hasattr(__a , "func" ): parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Optional[Any] = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
from __future__ import annotations from collections import deque class A__ : """simple docstring""" def __init__( self : Any , lowerCamelCase__ : list[str] ): a__ : list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(lowerCamelCase__ ) self.set_fail_transitions() def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : str ): for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _UpperCamelCase( self : Any , lowerCamelCase__ : str ): a__ : List[str] = 0 for character in keyword: a__ : Tuple = self.find_next_state(lowerCamelCase__ , lowerCamelCase__ ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) a__ : Union[str, Any] = len(self.adlist ) - 1 else: a__ : List[str] = next_state self.adlist[current_state]["output"].append(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : deque = deque() for node in self.adlist[0]["next_states"]: q.append(lowerCamelCase__ ) a__ : Tuple = 0 while q: a__ : str = q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowerCamelCase__ ) a__ : Tuple = self.adlist[r]["fail_state"] while ( self.find_next_state(lowerCamelCase__ , self.adlist[child]["value"] ) is None and state != 0 ): a__ : List[Any] = self.adlist[state]["fail_state"] a__ : Optional[int] = self.find_next_state( lowerCamelCase__ , self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: a__ : Dict = 0 a__ : Union[str, Any] = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ): a__ : dict = {} # returns a dict with keywords and list of its occurrences a__ : Tuple = 0 for i in range(len(lowerCamelCase__ ) ): while ( self.find_next_state(lowerCamelCase__ , string[i] ) is None and current_state != 0 ): a__ : Union[str, Any] = self.adlist[current_state]["fail_state"] a__ : Optional[Any] = self.find_next_state(lowerCamelCase__ , string[i] ) if next_state is None: a__ : str = 0 else: a__ : int = next_state for key in self.adlist[current_state]["output"]: if key not in result: a__ : Optional[Any] = [] result[key].append(i - len(lowerCamelCase__ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class A__ : """simple docstring""" _lowercase = 42 _lowercase = None _lowercase = None UpperCamelCase : Union[str, Any] = namedtuple("""CoinsDistribResult""", """moves excess""") def UpperCamelCase_ ( __a ) -> int: if root is None: return 0 # Validation def count_nodes(__a ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__a ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__a ) != count_coins(__a ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(__a ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) a__, a__ : Optional[Any] = get_distrib(node.left ) a__, a__ : List[Any] = get_distrib(node.right ) a__ : str = 1 - left_distrib_excess a__ : Dict = 1 - right_distrib_excess a__ : Any = ( left_distrib_moves + right_distrib_moves + abs(__a ) + abs(__a ) ) a__ : str = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__a , __a ) return get_distrib(__a )[0] if __name__ == "__main__": import doctest doctest.testmod()
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = TransfoXLTokenizer _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] ): super().setUp() a__ : Optional[int] = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] a__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _UpperCamelCase( self : Optional[int] , **lowerCamelCase__ : Dict ): a__ : Tuple = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : Tuple ): a__ : Tuple = "<unk> UNwanted , running" a__ : List[str] = "<unk> unwanted, running" return input_text, output_text def _UpperCamelCase( self : str ): a__ : Union[str, Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCamelCase__ ) a__ : Dict = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(lowerCamelCase__ , ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [0, 4, 8, 7] ) def _UpperCamelCase( self : List[Any] ): a__ : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] ) def _UpperCamelCase( self : List[str] ): a__ : str = TransfoXLTokenizer(lower_case=lowerCamelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _UpperCamelCase( self : Optional[int] ): a__ : Dict = TransfoXLTokenizer(lower_case=lowerCamelCase__ ) a__ : Dict = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" a__ : Tuple = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(tokenizer.convert_tokens_to_string(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = self.get_tokenizer() a__ : Union[str, Any] = len(lowerCamelCase__ ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowerCamelCase__ ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , "new1" )
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=7 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=30 , lowerCamelCase__ : Optional[int]=400 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=1 / 255 , lowerCamelCase__ : str=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p a__ : List[str] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} a__ : Optional[Any] = parent a__ : Union[str, Any] = batch_size a__ : Tuple = num_channels a__ : Union[str, Any] = min_resolution a__ : Union[str, Any] = max_resolution a__ : Union[str, Any] = do_resize a__ : Tuple = size a__ : str = do_normalize a__ : Optional[int] = image_mean a__ : Optional[Any] = image_std a__ : str = do_rescale a__ : Dict = rescale_factor a__ : int = do_pad def _UpperCamelCase( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _UpperCamelCase( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=False ): if not batched: a__ : str = image_inputs[0] if isinstance(lowerCamelCase__ , Image.Image ): a__, a__ : Tuple = image.size else: a__, a__ : int = image.shape[1], image.shape[2] if w < h: a__ : str = int(self.size["shortest_edge"] * h / w ) a__ : int = self.size["shortest_edge"] elif w > h: a__ : int = self.size["shortest_edge"] a__ : Dict = int(self.size["shortest_edge"] * w / h ) else: a__ : Dict = self.size["shortest_edge"] a__ : Tuple = self.size["shortest_edge"] else: a__ : Union[str, Any] = [] for image in image_inputs: a__, a__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) a__ : int = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0] a__ : str = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = ConditionalDetrImageProcessor if is_vision_available() else None def _UpperCamelCase( self : Optional[int] ): a__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _UpperCamelCase( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "size" ) ) def _UpperCamelCase( self : Tuple ): a__ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCamelCase__ ) a__ : str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCamelCase__ ) def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Optional[Any] ): # Initialize image_processing a__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a__, a__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a__, a__ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) a__ : Union[str, Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCamelCase( self : str ): # Initialize image_processing a__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a__, a__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a__ : Any = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values a__, a__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCamelCase( self : Optional[Any] ): # Initialize image_processing a__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input a__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values a__, a__ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched a__ : Tuple = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values a__, a__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _UpperCamelCase( self : Tuple ): # prepare image and target a__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: a__ : Tuple = json.loads(f.read() ) a__ : Any = {"image_id": 39_769, "annotations": target} # encode them a__ : List[str] = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) a__ : str = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors="pt" ) # verify pixel values a__ : Optional[Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase__ ) a__ : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) ) # verify area a__ : List[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase__ ) ) # verify boxes a__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase__ ) a__ : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase__ , atol=1E-3 ) ) # verify image_id a__ : Any = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase__ ) ) # verify is_crowd a__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase__ ) ) # verify class_labels a__ : int = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase__ ) ) # verify orig_size a__ : Union[str, Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase__ ) ) # verify size a__ : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Optional[int] ): # prepare image, target and masks_path a__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: a__ : int = json.loads(f.read() ) a__ : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} a__ : Dict = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them a__ : str = ConditionalDetrImageProcessor(format="coco_panoptic" ) a__ : Optional[int] = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors="pt" ) # verify pixel values a__ : int = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase__ ) a__ : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase__ , atol=1E-4 ) ) # verify area a__ : Optional[int] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase__ ) ) # verify boxes a__ : int = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase__ ) a__ : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase__ , atol=1E-3 ) ) # verify image_id a__ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase__ ) ) # verify is_crowd a__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase__ ) ) # verify class_labels a__ : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase__ ) ) # verify masks a__ : Union[str, Any] = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase__ ) # verify orig_size a__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase__ ) ) # verify size a__ : Dict = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase__ ) )
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right UpperCamelCase : Optional[Any] = 25_0004 UpperCamelCase : Dict = 25_0020 @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = MBartaaTokenizer _lowercase = MBartaaTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : str ): super().setUp() # We have a SentencePiece fixture for testing a__ : Dict = MBartaaTokenizer(lowerCamelCase__ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : Any ): a__ : List[str] = "<s>" a__ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "<mask>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_054 ) def _UpperCamelCase( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_054 ) def _UpperCamelCase( self : Dict ): a__ : str = MBartaaTokenizer(lowerCamelCase__ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=lowerCamelCase__ ) a__ : Any = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Tuple = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) a__ : str = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def _UpperCamelCase( self : Optional[int] ): # fmt: off a__ : Any = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , ) def _UpperCamelCase( self : Union[str, Any] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return a__ : str = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Optional[Any] = tempfile.mkdtemp() a__ : Tuple = tokenizer_r.save_pretrained(lowerCamelCase__ ) a__ : List[str] = tokenizer_p.save_pretrained(lowerCamelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) a__ : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ ) # Checks everything loads correctly in the same way a__ : str = tokenizer_r.from_pretrained(lowerCamelCase__ ) a__ : int = tokenizer_p.from_pretrained(lowerCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCamelCase__ ) # Save tokenizer rust, legacy_format=True a__ : int = tempfile.mkdtemp() a__ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ ) a__ : Dict = tokenizer_p.save_pretrained(lowerCamelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase__ , lowerCamelCase__ ) # Checks everything loads correctly in the same way a__ : Union[str, Any] = tokenizer_r.from_pretrained(lowerCamelCase__ ) a__ : Optional[int] = tokenizer_p.from_pretrained(lowerCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) ) shutil.rmtree(lowerCamelCase__ ) # Save tokenizer rust, legacy_format=False a__ : Tuple = tempfile.mkdtemp() a__ : List[str] = tokenizer_r.save_pretrained(lowerCamelCase__ , legacy_format=lowerCamelCase__ ) a__ : Any = tokenizer_p.save_pretrained(lowerCamelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way a__ : Optional[int] = tokenizer_r.from_pretrained(lowerCamelCase__ ) a__ : Dict = tokenizer_p.from_pretrained(lowerCamelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase__ , lowerCamelCase__ ) ) shutil.rmtree(lowerCamelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'facebook/mbart-large-50-one-to-many-mmt' _lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _lowercase = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2] @classmethod def _UpperCamelCase( cls : List[Any] ): a__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" ) a__ : List[Any] = 1 return cls def _UpperCamelCase( self : Dict ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 ) def _UpperCamelCase( self : int ): a__ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): self.assertIn(lowerCamelCase__ , self.tokenizer.all_special_ids ) a__ : Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] a__ : Any = self.tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : Union[str, Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , lowerCamelCase__ ) a__ : Union[str, Any] = 10 a__ : List[Any] = self.tokenizer(lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ ).input_ids[0] self.assertEqual(ids[0] , lowerCamelCase__ ) self.assertEqual(ids[-1] , 2 ) self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] ) def _UpperCamelCase( self : str ): a__ : Tuple = tempfile.mkdtemp() a__ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase__ ) a__ : Any = MBartaaTokenizer.from_pretrained(lowerCamelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase__ ) @require_torch def _UpperCamelCase( self : List[Any] ): a__ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , return_tensors="pt" ) a__ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == RO_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE] @require_torch def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) a__ : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) a__ : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _UpperCamelCase( self : List[Any] ): a__ : int = self.tokenizer(self.src_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=3 , return_tensors="pt" ) a__ : Tuple = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10 , return_tensors="pt" ) a__ : str = targets["input_ids"] a__ : Dict = shift_tokens_right(lowerCamelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" ) self.assertEqual( nested_simplify(lowerCamelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[250_004, 62, 3_034, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250_001, } , )
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
from __future__ import annotations def UpperCamelCase_ ( __a , __a ) -> list[list[int]]: a__ : list[list[int]] = [] a__ : list[int] = [] a__ : List[Any] = 0 a__ : Dict = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a , ) -> None: if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) UpperCamelCase : Optional[int] = [3, 34, 4, 12, 5, 2] UpperCamelCase : Tuple = 9 UpperCamelCase : Union[str, Any] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : List[Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A__ ( A__ ): """simple docstring""" _lowercase = 'mobilenet_v1' def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : List[Any]=224 , lowerCamelCase__ : Optional[int]=1.0 , lowerCamelCase__ : Any=8 , lowerCamelCase__ : Optional[Any]="relu6" , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=0.999 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : List[Any]=0.001 , **lowerCamelCase__ : Union[str, Any] , ): super().__init__(**lowerCamelCase__ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) a__ : Tuple = num_channels a__ : Union[str, Any] = image_size a__ : str = depth_multiplier a__ : int = min_depth a__ : Optional[Any] = hidden_act a__ : List[str] = tf_padding a__ : Any = classifier_dropout_prob a__ : List[Any] = initializer_range a__ : List[str] = layer_norm_eps class A__ ( A__ ): """simple docstring""" _lowercase = version.parse('1.11' ) @property def _UpperCamelCase( self : Tuple ): return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _UpperCamelCase( self : Optional[Any] ): if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _UpperCamelCase( self : int ): return 1E-4
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class A__ : """simple docstring""" def __init__( self : str , lowerCamelCase__ : str , lowerCamelCase__ : int=13 , lowerCamelCase__ : str=7 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[int]=99 , lowerCamelCase__ : Optional[Any]=64 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict=64 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=512 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : int=None , ): a__ : Optional[Any] = parent a__ : List[str] = batch_size a__ : Tuple = seq_length a__ : Dict = is_training a__ : str = use_input_mask a__ : int = use_token_type_ids a__ : Any = use_labels a__ : List[Any] = vocab_size a__ : Optional[Any] = hidden_size a__ : int = num_hidden_layers a__ : Optional[Any] = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : Tuple = hidden_act a__ : Dict = hidden_dropout_prob a__ : List[str] = attention_probs_dropout_prob a__ : Any = max_position_embeddings a__ : int = type_vocab_size a__ : List[Any] = type_sequence_label_size a__ : Tuple = initializer_range a__ : Any = num_labels a__ : Optional[int] = num_choices a__ : Union[str, Any] = scope def _UpperCamelCase( self : Tuple ): return MPNetConfig.from_pretrained("microsoft/mpnet-base" ) def _UpperCamelCase( self : List[Any] ): a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Union[str, Any] = None if self.use_input_mask: a__ : str = random_attention_mask([self.batch_size, self.seq_length] ) a__ : Optional[Any] = None a__ : List[Any] = None a__ : str = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) a__ : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase( self : Optional[int] ): return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str ): a__ : Union[str, Any] = MPNetModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCamelCase( self : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ): a__ : Union[str, Any] = MPNetForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ): a__ : Union[str, Any] = self.num_labels a__ : Tuple = MPNetForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple ): a__ : str = self.num_choices a__ : List[Any] = MPNetForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a__ : List[Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] ): a__ : Any = self.num_labels a__ : Any = MPNetForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() ((a__), (a__), (a__), (a__), (a__), (a__)) : Optional[Any] = config_and_inputs a__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) _lowercase = False _lowercase = True def _UpperCamelCase( self : Dict ): a__ : str = MPNetModelTester(self ) a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : Tuple ): self.config_tester.run_common_tests() def _UpperCamelCase( self : List[str] ): a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*lowerCamelCase__ ) @require_torch class A__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase( self : int ): a__ : Dict = MPNetModel.from_pretrained("microsoft/mpnet-base" ) a__ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) a__ : List[str] = model(lowerCamelCase__ )[0] a__ : Any = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : Tuple = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : List[str] = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" _lowercase = 'timm_backbone' def __init__( self : Any , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] , ): super().__init__(**lowerCamelCase__ ) a__ : Any = backbone a__ : Any = num_channels a__ : Union[str, Any] = features_only a__ : List[str] = use_pretrained_backbone a__ : Optional[Any] = True a__ : Optional[int] = out_indices if out_indices is not None else (-1,)
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : str = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'align_text_model' def __init__( self : Dict , lowerCamelCase__ : Dict=30_522 , lowerCamelCase__ : Optional[int]=768 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : List[str]=3_072 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=512 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Optional[Any]=1E-12 , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Optional[int]="absolute" , lowerCamelCase__ : List[Any]=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__(**lowerCamelCase__ ) a__ : Optional[Any] = vocab_size a__ : Optional[int] = hidden_size a__ : str = num_hidden_layers a__ : int = num_attention_heads a__ : Any = hidden_act a__ : str = intermediate_size a__ : List[Any] = hidden_dropout_prob a__ : str = attention_probs_dropout_prob a__ : List[Any] = max_position_embeddings a__ : List[Any] = type_vocab_size a__ : Optional[int] = initializer_range a__ : int = layer_norm_eps a__ : Optional[Any] = position_embedding_type a__ : Dict = use_cache a__ : Optional[int] = pad_token_id @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : List[str] ): cls._set_token_in_kwargs(lowerCamelCase__ ) a__, a__ : Dict = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": a__ : int = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = 'align_vision_model' def __init__( self : List[Any] , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 600 , lowerCamelCase__ : float = 2.0 , lowerCamelCase__ : float = 3.1 , lowerCamelCase__ : int = 8 , lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase__ : List[int] = [] , lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase__ : float = 0.25 , lowerCamelCase__ : str = "swish" , lowerCamelCase__ : int = 2_560 , lowerCamelCase__ : str = "mean" , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : float = 0.001 , lowerCamelCase__ : float = 0.99 , lowerCamelCase__ : float = 0.2 , **lowerCamelCase__ : List[str] , ): super().__init__(**lowerCamelCase__ ) a__ : Union[str, Any] = num_channels a__ : List[Any] = image_size a__ : int = width_coefficient a__ : int = depth_coefficient a__ : Union[str, Any] = depth_divisor a__ : Optional[Any] = kernel_sizes a__ : Union[str, Any] = in_channels a__ : Optional[Any] = out_channels a__ : Any = depthwise_padding a__ : Any = strides a__ : int = num_block_repeats a__ : List[Any] = expand_ratios a__ : int = squeeze_expansion_ratio a__ : Optional[Any] = hidden_act a__ : List[str] = hidden_dim a__ : List[str] = pooling_type a__ : List[str] = initializer_range a__ : Union[str, Any] = batch_norm_eps a__ : int = batch_norm_momentum a__ : List[str] = drop_connect_rate a__ : Dict = sum(lowerCamelCase__ ) * 4 @classmethod def _UpperCamelCase( cls : Dict , lowerCamelCase__ : Union[str, os.PathLike] , **lowerCamelCase__ : Dict ): cls._set_token_in_kwargs(lowerCamelCase__ ) a__, a__ : Union[str, Any] = cls.get_config_dict(lowerCamelCase__ , **lowerCamelCase__ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("model_type" ) == "align": a__ : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCamelCase__ , **lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" _lowercase = 'align' _lowercase = True def __init__( self : str , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict=640 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : Any=0.02 , **lowerCamelCase__ : Optional[int] , ): super().__init__(**lowerCamelCase__ ) if text_config is None: a__ : Optional[int] = {} logger.info("text_config is None. Initializing the AlignTextConfig with default values." ) if vision_config is None: a__ : Union[str, Any] = {} logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." ) a__ : Optional[Any] = AlignTextConfig(**lowerCamelCase__ ) a__ : str = AlignVisionConfig(**lowerCamelCase__ ) a__ : Dict = projection_dim a__ : List[str] = temperature_init_value a__ : Dict = initializer_range @classmethod def _UpperCamelCase( cls : Tuple , lowerCamelCase__ : AlignTextConfig , lowerCamelCase__ : AlignVisionConfig , **lowerCamelCase__ : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCamelCase__ ) def _UpperCamelCase( self : Any ): a__ : int = copy.deepcopy(self.__dict__ ) a__ : int = self.text_config.to_dict() a__ : Tuple = self.vision_config.to_dict() a__ : Optional[Any] = self.__class__.model_type return output
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
def UpperCamelCase_ ( __a ) -> str: stooge(__a , 0 , len(__a ) - 1 ) return arr def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a__, a__ : Optional[int] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a__ : List[Any] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(__a , __a , (h - t) ) # Recursively sort last 2/3 elements stooge(__a , i + t , (__a) ) # Recursively sort first 2/3 elements stooge(__a , __a , (h - t) ) if __name__ == "__main__": UpperCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase : str = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
UpperCamelCase : int = [ """DownloadConfig""", """DownloadManager""", """DownloadMode""", """StreamingDownloadManager""", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
from math import factorial, pi def UpperCamelCase_ ( __a , __a = 30 ) -> float: if not isinstance(__a , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(__a , __a ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) a__ : Optional[Any] = float(__a ) a__ : Any = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__a ) ) def UpperCamelCase_ ( __a , __a = 30 ) -> float: if not isinstance(__a , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(__a , __a ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) a__ : Union[str, Any] = float(__a ) a__ : Dict = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__a ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Optional[int] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
def UpperCamelCase_ ( __a ) -> bool: return str(__a ) == str(__a )[::-1] def UpperCamelCase_ ( __a ) -> int: return int(__a ) + int(str(__a )[::-1] ) def UpperCamelCase_ ( __a = 10_000 ) -> int: a__ : Optional[Any] = [] for num in range(1 , __a ): a__ : Optional[Any] = 0 a__ : List[Any] = num while iterations < 50: a__ : Tuple = sum_reverse(__a ) iterations += 1 if is_palindrome(__a ): break else: lychrel_nums.append(__a ) return len(__a ) if __name__ == "__main__": print(f"""{solution() = }""")
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['speech'] def __init__( self : List[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ): requires_backends(self , ["speech"] ) class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['speech'] def __init__( self : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[Any] ): requires_backends(self , ["speech"] )
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
def UpperCamelCase_ ( __a , __a , __a=False ) -> Optional[int]: if isinstance(__a , __a ) and isinstance(__a , __a ): a__ : Union[str, Any] = len(set_a.intersection(__a ) ) if alternative_union: a__ : List[Any] = len(__a ) + len(__a ) else: a__ : List[str] = len(set_a.union(__a ) ) return intersection / union if isinstance(__a , (list, tuple) ) and isinstance(__a , (list, tuple) ): a__ : Tuple = [element for element in set_a if element in set_b] if alternative_union: a__ : Tuple = len(__a ) + len(__a ) return len(__a ) / union else: a__ : List[Any] = set_a + [element for element in set_b if element not in set_a] return len(__a ) / len(__a ) return len(__a ) / len(__a ) return None if __name__ == "__main__": UpperCamelCase : int = {"""a""", """b""", """c""", """d""", """e"""} UpperCamelCase : Union[str, Any] = {"""c""", """d""", """e""", """f""", """h""", """i"""} print(jaccard_similarity(set_a, set_b))
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin UpperCamelCase : int = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Dict=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=14 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : int=19 , lowerCamelCase__ : List[Any]=5 , lowerCamelCase__ : int=4 , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=[1, 2, 3, 4, 5] , lowerCamelCase__ : List[Any]=25 , lowerCamelCase__ : List[Any]=5 , ): a__ : Optional[Any] = d_model a__ : List[str] = parent a__ : str = batch_size a__ : List[Any] = prediction_length a__ : List[Any] = context_length a__ : str = cardinality a__ : List[Any] = num_time_features a__ : Dict = lags_sequence a__ : int = embedding_dimension a__ : Dict = is_training a__ : Dict = hidden_size a__ : int = num_hidden_layers a__ : str = num_attention_heads a__ : Dict = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Optional[Any] = attention_probs_dropout_prob a__ : Optional[int] = context_length a__ : Any = prediction_length + label_length a__ : List[Any] = label_length a__ : int = moving_average a__ : Any = autocorrelation_factor def _UpperCamelCase( self : Dict ): return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ): a__ : str = config.context_length + max(config.lags_sequence ) a__ : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) a__ : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) a__ : Optional[int] = floats_tensor([self.batch_size, _past_length] ) a__ : Any = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs a__ : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) a__ : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] ) a__ : str = { "past_values": past_values, "static_categorical_features": static_categorical_features, "past_time_features": past_time_features, "past_observed_mask": past_observed_mask, "future_time_features": future_time_features, "future_values": future_values, } return inputs_dict def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.get_config() a__ : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase__ ) return config, inputs_dict def _UpperCamelCase( self : str ): a__, a__ : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ): a__ : Union[str, Any] = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval() a__ : List[Any] = model(**lowerCamelCase__ ) a__ : int = outputs.encoder_last_hidden_state a__ : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: a__ : str = model.get_encoder() encoder.save_pretrained(lowerCamelCase__ ) a__ : List[str] = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ ) a__, a__, a__, a__, a__ : List[str] = model.create_network_inputs(**lowerCamelCase__ ) a__, a__ : Union[str, Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) a__ : str = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) a__ : Optional[int] = encoder(inputs_embeds=lowerCamelCase__ )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) a__ : Tuple = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) a__ : Tuple = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) a__ : Dict = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) a__ : Union[str, Any] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: a__ : Any = model.get_decoder() decoder.save_pretrained(lowerCamelCase__ ) a__ : Dict = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ ) a__ : Optional[int] = decoder( trend=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _lowercase = (AutoformerForPrediction,) if is_torch_available() else () _lowercase = {'feature-extraction': AutoformerModel} if is_torch_available() else {} _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = AutoformerModelTester(self ) a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): self.config_tester.run_common_tests() def _UpperCamelCase( self : List[Any] ): a__, a__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase__ ) a__, a__ : List[str] = model_class.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ ) self.assertEqual(info["missing_keys"] , [] ) def _UpperCamelCase( self : Union[str, Any] ): a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ ) @unittest.skip(reason="Model has no tokens embeddings" ) def _UpperCamelCase( self : Optional[Any] ): pass def _UpperCamelCase( self : Any ): a__ : str = inspect.signature(getattr(lowerCamelCase__ , "forward" ) ) # The main input is the name of the argument after `self` a__ : int = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[Any] = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Tuple = [*signature.parameters.keys()] a__ : Any = [ "past_values", "past_time_features", "past_observed_mask", "static_categorical_features", "static_real_features", "future_values", "future_time_features", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("future_observed_mask" ) expected_arg_names.extend( [ "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "output_hidden_states", "output_attentions", "use_cache", "return_dict", ] ) self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__, a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() a__ : Any = True a__ : int = getattr(self.model_tester , "seq_length" , lowerCamelCase__ ) a__ : Optional[Any] = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase__ ) a__ : Optional[int] = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase__ ) a__ : Optional[int] = getattr(self.model_tester , "d_model" , lowerCamelCase__ ) a__ : Tuple = getattr(self.model_tester , "num_attention_heads" , lowerCamelCase__ ) a__ : Union[str, Any] = d_model // num_attention_heads for model_class in self.all_model_classes: a__ : Tuple = True a__ : List[Any] = False a__ : Optional[Any] = True a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] a__ : List[str] = True a__ : Any = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : str = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : str = outputs.encoder_attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) a__ : int = len(lowerCamelCase__ ) a__ : Tuple = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) # decoder attentions a__ : List[Any] = outputs.decoder_attentions self.assertIsInstance(lowerCamelCase__ , (list, tuple) ) self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions a__ : Optional[int] = outputs.cross_attentions self.assertIsInstance(lowerCamelCase__ , (list, tuple) ) self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine a__ : int = True a__ : List[str] = True a__ : Union[str, Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) self.assertEqual(out_len + 2 , len(lowerCamelCase__ ) ) a__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def _UpperCamelCase( self : Any ): super().test_retain_grad_hidden_states_attentions() def UpperCamelCase_ ( __a="train-batch.pt" ) -> Optional[Any]: a__ : Union[str, Any] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__a , repo_type="dataset" ) a__ : Tuple = torch.load(__a , map_location=__a ) return batch @require_torch @slow class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : Optional[Any] = prepare_batch() with torch.no_grad(): a__ : int = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0] a__ : Union[str, Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCamelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict ): a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : int = prepare_batch("val-batch.pt" ) with torch.no_grad(): a__ : List[Any] = model( past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state a__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , lowerCamelCase__ ) a__ : Tuple = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCamelCase__ ) self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict ): a__ : Optional[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ ) a__ : int = prepare_batch("val-batch.pt" ) with torch.no_grad(): a__ : List[str] = model.generate( static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , ) a__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , lowerCamelCase__ ) a__ : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCamelCase__ ) a__ : Tuple = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase__ , rtol=1E-1 ) )
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str=3 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Tuple=[10, 20, 30, 40] , lowerCamelCase__ : Tuple=[1, 1, 2, 1] , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : int=None , ): a__ : Optional[Any] = parent a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Dict = num_channels a__ : Any = embeddings_size a__ : int = hidden_sizes a__ : Optional[int] = depths a__ : List[str] = is_training a__ : Dict = use_labels a__ : int = hidden_act a__ : Tuple = num_labels a__ : Tuple = scope a__ : str = len(lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Union[str, Any] = None if self.use_labels: a__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) a__ : Any = self.get_config() return config, pixel_values, labels def _UpperCamelCase( self : Optional[Any] ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ): a__ : str = TFResNetModel(config=lowerCamelCase__ ) a__ : Union[str, Any] = model(lowerCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ): a__ : List[str] = self.num_labels a__ : Any = TFResNetForImageClassification(lowerCamelCase__ ) a__ : Dict = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCamelCase( self : Optional[Any] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _lowercase = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : Tuple = TFResNetModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def _UpperCamelCase( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase( self : Any ): return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def _UpperCamelCase( self : Optional[Any] ): pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Union[str, Any] ): a__, a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(lowerCamelCase__ ) a__ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Tuple = [*signature.parameters.keys()] a__ : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): def check_hidden_states_output(lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str ): a__ : Optional[int] = model_class(lowerCamelCase__ ) a__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) ) a__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a__ : str = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: a__ : Optional[int] = layer_type a__ : List[str] = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : List[Any] = True check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : Dict ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : List[str] = TFResNetModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Tuple: a__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : int ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _UpperCamelCase( self : Dict ): a__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) a__ : Union[str, Any] = self.default_image_processor a__ : Optional[Any] = prepare_img() a__ : Dict = image_processor(images=lowerCamelCase__ , return_tensors="tf" ) # forward pass a__ : Optional[int] = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : Union[str, Any] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase__ , atol=1E-4 ) )
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig UpperCamelCase : str = { """susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""", """susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""", } class A__ ( A__ ): """simple docstring""" _lowercase = 'ernie_m' _lowercase = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : Tuple , lowerCamelCase__ : int = 250_002 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : int = 12 , lowerCamelCase__ : int = 12 , lowerCamelCase__ : int = 3_072 , lowerCamelCase__ : str = "gelu" , lowerCamelCase__ : float = 0.1 , lowerCamelCase__ : float = 0.1 , lowerCamelCase__ : int = 514 , lowerCamelCase__ : float = 0.02 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : float = 1E-05 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : List[Any]=0.0 , **lowerCamelCase__ : Any , ): super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = vocab_size a__ : str = hidden_size a__ : Optional[Any] = num_hidden_layers a__ : Any = num_attention_heads a__ : Optional[int] = intermediate_size a__ : int = hidden_act a__ : str = hidden_dropout_prob a__ : List[Any] = attention_probs_dropout_prob a__ : Tuple = max_position_embeddings a__ : Dict = initializer_range a__ : Tuple = layer_norm_eps a__ : Union[str, Any] = classifier_dropout a__ : Any = is_decoder a__ : Any = act_dropout
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
def UpperCamelCase_ ( __a ) -> float: a__ : Any = 0 while len(__a ) > 1: a__ : int = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): a__ : str = files.index(min(__a ) ) temp += files[min_index] files.pop(__a ) files.append(__a ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
from ..utils import DummyObject, requires_backends class A__ ( metaclass=A__ ): """simple docstring""" _lowercase = ['note_seq'] def __init__( self : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ): requires_backends(self , ["note_seq"] ) @classmethod def _UpperCamelCase( cls : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Union[str, Any] ): requires_backends(cls , ["note_seq"] ) @classmethod def _UpperCamelCase( cls : List[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ): requires_backends(cls , ["note_seq"] )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def UpperCamelCase_ ( __a ) -> Optional[int]: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def UpperCamelCase_ ( __a ) -> Optional[int]: # word like '180' or '身高' or '神' for char in word: a__ : Optional[Any] = ord(__a ) if not _is_chinese_char(__a ): return 0 return 1 def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Optional[Any] = set() for token in tokens: a__ : Optional[Any] = len(__a ) > 1 and is_chinese(__a ) if chinese_word: word_set.add(__a ) a__ : Optional[Any] = list(__a ) return word_list def UpperCamelCase_ ( __a , __a ) -> Optional[int]: if not chinese_word_set: return bert_tokens a__ : int = max([len(__a ) for w in chinese_word_set] ) a__ : Tuple = bert_tokens a__, a__ : Union[str, Any] = 0, len(__a ) while start < end: a__ : Tuple = True if is_chinese(bert_word[start] ): a__ : Optional[int] = min(end - start , __a ) for i in range(__a , 1 , -1 ): a__ : Tuple = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a__ : Union[str, Any] = "##" + bert_word[j] a__ : str = start + i a__ : Optional[Any] = False break if single_word: start += 1 return bert_word def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Any = [] for i in range(0 , len(__a ) , 100 ): a__ : List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0] a__ : Optional[Any] = [get_chinese_word(__a ) for r in res] ltp_res.extend(__a ) assert len(__a ) == len(__a ) a__ : List[Any] = [] for i in range(0 , len(__a ) , 100 ): a__ : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__a , truncation=__a , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__a ) == len(__a ) a__ : List[Any] = [] for input_ids, chinese_word in zip(__a , __a ): a__ : List[Any] = [] for id in input_ids: a__ : Tuple = bert_tokenizer._convert_id_to_token(__a ) input_tokens.append(__a ) a__ : List[str] = add_sub_symbol(__a , __a ) a__ : Tuple = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__a ): if token[:2] == "##": a__ : Optional[Any] = token[2:] # save chinese tokens' pos if len(__a ) == 1 and _is_chinese_char(ord(__a ) ): ref_id.append(__a ) ref_ids.append(__a ) assert len(__a ) == len(__a ) return ref_ids def UpperCamelCase_ ( __a ) -> Dict: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: a__ : int = f.readlines() a__ : Dict = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a__ : List[Any] = LTP(args.ltp ) # faster in GPU device a__ : List[str] = BertTokenizer.from_pretrained(args.bert ) a__ : Optional[int] = prepare_ref(__a , __a , __a ) with open(args.save_path , "w" , encoding="utf-8" ) as f: a__ : List[str] = [json.dumps(__a ) + "\n" for ref in ref_ids] f.writelines(__a ) if __name__ == "__main__": UpperCamelCase : List[str] = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") UpperCamelCase : Dict = parser.parse_args() main(args)
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : int = {} class A__ ( A__ ): """simple docstring""" _lowercase = 'llama' _lowercase = ['past_key_values'] def __init__( self : Dict , lowerCamelCase__ : str=32_000 , lowerCamelCase__ : Dict=4_096 , lowerCamelCase__ : Tuple=11_008 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : int=None , lowerCamelCase__ : Union[str, Any]="silu" , lowerCamelCase__ : Any=2_048 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : Dict=1E-6 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Optional[int]=None , **lowerCamelCase__ : Union[str, Any] , ): a__ : List[str] = vocab_size a__ : str = max_position_embeddings a__ : Dict = hidden_size a__ : List[str] = intermediate_size a__ : Dict = num_hidden_layers a__ : Optional[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: a__ : Tuple = num_attention_heads a__ : str = num_key_value_heads a__ : Dict = hidden_act a__ : Optional[int] = initializer_range a__ : str = rms_norm_eps a__ : Optional[Any] = pretraining_tp a__ : int = use_cache a__ : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Any ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCamelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) a__ : Tuple = self.rope_scaling.get("type" , lowerCamelCase__ ) a__ : Tuple = self.rope_scaling.get("factor" , lowerCamelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CanineTokenizer _lowercase = False def _UpperCamelCase( self : Dict ): super().setUp() a__ : str = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCamelCase( self : Union[str, Any] ): return CanineTokenizer.from_pretrained("google/canine-s" ) def _UpperCamelCase( self : Optional[Any] , **lowerCamelCase__ : Tuple ): a__ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) a__ : Optional[int] = 1_024 return tokenizer @require_torch def _UpperCamelCase( self : Tuple ): a__ : Dict = self.canine_tokenizer a__ : Union[str, Any] = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off a__ : Dict = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0] # fmt: on a__ : str = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = self.canine_tokenizer a__ : List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] a__ : Any = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn("input_ids" , lowerCamelCase__ ) self.assertIn("attention_mask" , lowerCamelCase__ ) self.assertIn("token_type_ids" , lowerCamelCase__ ) @require_torch def _UpperCamelCase( self : List[str] ): a__ : int = self.canine_tokenizer a__ : List[Any] = [ "What's the weater?", "It's about 25 degrees.", ] a__ : Dict = tokenizer( text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , truncation=lowerCamelCase__ , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def _UpperCamelCase( self : int ): # safety check on max_len default value so we are sure the test works a__ : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test a__ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a__ : Optional[Any] = tempfile.mkdtemp() a__ : int = " He is very happy, UNwant\u00E9d,running" a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) a__ : int = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) a__ : Optional[int] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) shutil.rmtree(lowerCamelCase__ ) a__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc a__ : Tuple = tempfile.mkdtemp() a__ : int = " He is very happy, UNwant\u00E9d,running" a__ : Any = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: a__ : List[Any] = chr(0XE007 ) additional_special_tokens.append(lowerCamelCase__ ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) a__ : Tuple = tokenizer.__class__.from_pretrained(lowerCamelCase__ ) a__ : List[Any] = after_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertIn(lowerCamelCase__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) a__ : List[str] = tokenizer.__class__.from_pretrained(lowerCamelCase__ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__, a__ : Tuple = self.get_clean_sequence(lowerCamelCase__ ) # a special token for Canine can be defined as follows: a__ : List[Any] = 0XE005 a__ : Optional[Any] = chr(lowerCamelCase__ ) tokenizer.add_special_tokens({"cls_token": special_token} ) a__ : str = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) a__ : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase__ ) a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , input_encoded + special_token_id ) a__ : List[Any] = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) self.assertTrue(special_token not in decoded ) def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : List[Any] = chr(0XE005 ) a__ : str = chr(0XE006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} ) a__ : Dict = tokenizer.tokenize(lowerCamelCase__ ) a__ : int = tokenizer.tokenize(lowerCamelCase__ ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) self.assertEqual(len(lowerCamelCase__ ) , 1 ) self.assertEqual(token_a[0] , lowerCamelCase__ ) self.assertEqual(token_a[0] , lowerCamelCase__ ) @require_tokenizers def _UpperCamelCase( self : Tuple ): a__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: a__ : List[Any] = 0XE006 a__ : Union[str, Any] = chr(lowerCamelCase__ ) a__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ ) tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCamelCase__ ) tokenizer.from_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: a__ : Dict = json.load(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: a__ : Tuple = json.load(lowerCamelCase__ ) # a special token for Canine can be defined as follows: a__ : Optional[int] = 0XE006 a__ : Tuple = chr(lowerCamelCase__ ) a__ : List[Any] = [new_token_a] a__ : Optional[Any] = [new_token_a] with open(os.path.join(lowerCamelCase__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(lowerCamelCase__ , lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(lowerCamelCase__ , lowerCamelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a__ : Any = tokenizer_class.from_pretrained(lowerCamelCase__ , extra_ids=0 ) self.assertIn(lowerCamelCase__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) a__ : Optional[Any] = 0XE007 a__ : str = chr(lowerCamelCase__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a__ : List[str] = [AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ )] a__ : List[str] = tokenizer_class.from_pretrained( lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , extra_ids=0 ) self.assertIn(lowerCamelCase__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def _UpperCamelCase( self : Union[str, Any] ): a__ : int = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : Optional[Any] = "hello world" if self.space_between_special_tokens: a__ : Any = "[CLS] hello world [SEP]" else: a__ : str = input a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Dict = tokenizer.decode(lowerCamelCase__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCamelCase__ , [output, output.lower()] ) def _UpperCamelCase( self : Dict ): a__ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : int = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] a__ : Optional[Any] = "a" a__ : Optional[Any] = ord(lowerCamelCase__ ) for attr in attributes_list: setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ ) setattr(lowerCamelCase__ , attr + "_id" , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(getattr(lowerCamelCase__ , attr + "_id" ) , lowerCamelCase__ ) setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [] ) a__ : List[Any] = 0XE006 a__ : Dict = chr(lowerCamelCase__ ) setattr(lowerCamelCase__ , "additional_special_tokens_ids" , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens" ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCamelCase__ , "additional_special_tokens_ids" ) , [additional_special_token_id] ) def _UpperCamelCase( self : str ): pass def _UpperCamelCase( self : int ): pass def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : List[Any] ): pass def _UpperCamelCase( self : Optional[int] ): pass
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: UpperCamelCase : Dict = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str=7 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : str=30 , lowerCamelCase__ : List[str]=400 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=None , ): a__ : Dict = size if size is not None else {"height": 20, "width": 20} a__ : Optional[int] = parent a__ : Tuple = batch_size a__ : int = num_channels a__ : Any = image_size a__ : Tuple = min_resolution a__ : Tuple = max_resolution a__ : Union[str, Any] = size a__ : str = do_normalize a__ : List[Any] = do_convert_rgb a__ : str = [512, 1_024, 2_048, 4_096] a__ : str = patch_size if patch_size is not None else {"height": 16, "width": 16} def _UpperCamelCase( self : Dict ): return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def _UpperCamelCase( self : int ): a__ : Union[str, Any] = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" a__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = PixaStructImageProcessor if is_vision_available() else None def _UpperCamelCase( self : Dict ): a__ : Optional[int] = PixaStructImageProcessingTester(self ) @property def _UpperCamelCase( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : int ): a__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_convert_rgb" ) ) def _UpperCamelCase( self : List[Any] ): a__ : List[str] = self.image_processor_tester.prepare_dummy_image() a__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) a__ : Union[str, Any] = 2_048 a__ : int = image_processor(lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) ) def _UpperCamelCase( self : List[Any] ): # Initialize image_processor a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : Optional[int] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input a__ : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched a__ : Tuple = image_processor( lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _UpperCamelCase( self : Any ): # Initialize image_processor a__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 a__ : int = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(lowerCamelCase__ ): a__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches a__ : List[Any] = "Hello" a__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched a__ : str = image_processor( lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ , header_text=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _UpperCamelCase( self : Optional[Any] ): # Initialize image_processor a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) a__ : Any = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input a__ : Optional[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched a__ : Optional[int] = image_processor( lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def _UpperCamelCase( self : Tuple ): # Initialize image_processor a__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input a__ : Optional[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input a__ : str = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched a__ : List[str] = image_processor( lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , ) @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = PixaStructImageProcessor if is_vision_available() else None def _UpperCamelCase( self : Union[str, Any] ): a__ : List[str] = PixaStructImageProcessingTester(self , num_channels=4 ) a__ : List[str] = 3 @property def _UpperCamelCase( self : List[str] ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : Optional[Any] ): a__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_convert_rgb" ) ) def _UpperCamelCase( self : str ): # Initialize image_processor a__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : Optional[Any] = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input a__ : Any = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched a__ : str = image_processor( lowerCamelCase__ , return_tensors="pt" , max_patches=lowerCamelCase__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = 'ssube/stable-diffusion-x4-upscaler-onnx' def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str=0 ): a__ : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCamelCase__ ) ) a__ : Optional[int] = torch.manual_seed(lowerCamelCase__ ) a__ : Dict = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[Any] = self.get_dummy_inputs() a__ : Any = pipe(**lowerCamelCase__ ).images a__ : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) a__ : Union[str, Any] = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : List[Any] = self.get_dummy_inputs() a__ : Optional[Any] = pipe(**lowerCamelCase__ ).images a__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a__ : Tuple = np.array( [0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _UpperCamelCase( self : List[Any] ): a__ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a__ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : List[Any] = self.get_dummy_inputs() a__ : Dict = pipe(**lowerCamelCase__ ).images a__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a__ : Optional[int] = np.array( [0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Any = self.get_dummy_inputs() a__ : List[Any] = pipe(**lowerCamelCase__ ).images a__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a__ : Optional[int] = np.array( [0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def _UpperCamelCase( self : Any ): a__ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) a__ : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[int] = self.get_dummy_inputs() a__ : int = pipe(**lowerCamelCase__ ).images a__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) a__ : Optional[int] = np.array( [0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" @property def _UpperCamelCase( self : Any ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _UpperCamelCase( self : Any ): a__ : Any = ort.SessionOptions() a__ : Tuple = False return options def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a__ : int = init_image.resize((128, 128) ) # using the PNDM scheduler by default a__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Any = "A fantasy landscape, trending on artstation" a__ : Dict = torch.manual_seed(0 ) a__ : Tuple = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="np" , ) a__ : Any = output.images a__ : Any = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) a__ : List[str] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) a__ : Optional[Any] = init_image.resize((128, 128) ) a__ : Optional[int] = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" ) a__ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[int] = "A fantasy landscape, trending on artstation" a__ : Optional[int] = torch.manual_seed(0 ) a__ : Tuple = pipe( prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type="np" , ) a__ : Dict = output.images a__ : Tuple = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) a__ : Tuple = np.array( [0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase_ ( __a ) -> Tuple: a__ : Optional[Any] = 384 if "tiny" in model_name: a__ : Optional[int] = [3, 3, 9, 3] a__ : Optional[int] = [96, 192, 384, 768] if "small" in model_name: a__ : Optional[Any] = [3, 3, 27, 3] a__ : str = [96, 192, 384, 768] if "base" in model_name: a__ : Union[str, Any] = [3, 3, 27, 3] a__ : Any = [128, 256, 512, 1_024] a__ : Tuple = 512 if "large" in model_name: a__ : List[str] = [3, 3, 27, 3] a__ : List[str] = [192, 384, 768, 1_536] a__ : int = 768 if "xlarge" in model_name: a__ : Any = [3, 3, 27, 3] a__ : int = [256, 512, 1_024, 2_048] a__ : Optional[int] = 1_024 # set label information a__ : List[Any] = 150 a__ : Optional[Any] = "huggingface/label-files" a__ : Dict = "ade20k-id2label.json" a__ : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : Optional[int] = {int(__a ): v for k, v in idalabel.items()} a__ : Dict = {v: k for k, v in idalabel.items()} a__ : str = ConvNextConfig( depths=__a , hidden_sizes=__a , out_features=["stage1", "stage2", "stage3", "stage4"] ) a__ : List[str] = UperNetConfig( backbone_config=__a , auxiliary_in_channels=__a , num_labels=__a , idalabel=__a , labelaid=__a , ) return config def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Optional[int] = [] # fmt: off # stem rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") ) rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") ) rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") ) rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ("decode_head.conv_seg.weight", "decode_head.classifier.weight"), ("decode_head.conv_seg.bias", "decode_head.classifier.bias"), ("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"), ("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"), ] ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : str = dct.pop(__a ) a__ : List[Any] = val def UpperCamelCase_ ( __a , __a , __a ) -> str: a__ : Union[str, Any] = { "upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth", "upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth", "upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth", "upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth", "upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth", } a__ : Tuple = model_name_to_url[model_name] a__ : List[Any] = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )["state_dict"] a__ : List[Any] = get_upernet_config(__a ) a__ : Dict = UperNetForSemanticSegmentation(__a ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): a__ : Tuple = state_dict.pop(__a ) if "bn" in key: a__ : List[Any] = key.replace("bn" , "batch_norm" ) a__ : Dict = val # rename keys a__ : str = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) model.load_state_dict(__a ) # verify on image a__ : Optional[Any] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg" a__ : int = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) a__ : Union[str, Any] = SegformerImageProcessor() a__ : Dict = processor(__a , return_tensors="pt" ).pixel_values with torch.no_grad(): a__ : Optional[int] = model(__a ) if model_name == "upernet-convnext-tiny": a__ : Tuple = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": a__ : str = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": a__ : List[str] = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": a__ : Dict = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": a__ : Optional[Any] = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print("Logits:" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__a ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[f"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) UpperCamelCase : Optional[int] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCamelCase_ ( __a ) -> Dict: a__ : Optional[int] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] a__ : Optional[int] = True if "large" in model_name or "huge" in model_name else False a__ : List[str] = True if "large" in model_name or "huge" in model_name else False a__ : int = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: a__ : Optional[int] = [3, 3, 3, 3] a__ : Optional[int] = [5, 5, 5, 5] elif "fl4" in model_name: a__ : int = [4, 4, 4, 4] a__ : Any = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: a__ : Union[str, Any] = [3, 3, 3, 3] if "lrf" in model_name: a__ : Any = [3, 3, 3, 3] else: a__ : Dict = [2, 2, 2, 2] if "tiny" in model_name: a__ : Dict = 96 elif "small" in model_name: a__ : Dict = 96 elif "base" in model_name: a__ : Dict = 128 elif "large" in model_name: a__ : Tuple = 192 elif "xlarge" in model_name: a__ : Union[str, Any] = 256 elif "huge" in model_name: a__ : str = 352 # set label information a__ : Union[str, Any] = "huggingface/label-files" if "large" in model_name or "huge" in model_name: a__ : str = "imagenet-22k-id2label.json" else: a__ : List[str] = "imagenet-1k-id2label.json" a__ : Tuple = json.load(open(hf_hub_download(__a , __a , repo_type="dataset" ) , "r" ) ) a__ : Optional[Any] = {int(__a ): v for k, v in idalabel.items()} a__ : Optional[Any] = {v: k for k, v in idalabel.items()} a__ : Optional[int] = FocalNetConfig( embed_dim=__a , depths=__a , focal_levels=__a , focal_windows=__a , use_conv_embed=__a , idalabel=__a , labelaid=__a , use_post_layernorm=__a , use_layerscale=__a , ) return config def UpperCamelCase_ ( __a ) -> List[str]: if "patch_embed.proj" in name: a__ : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a__ : Any = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: a__ : Optional[int] = "encoder." + name if "encoder.layers" in name: a__ : Dict = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: a__ : Tuple = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: a__ : str = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: a__ : int = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: a__ : Union[str, Any] = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: a__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": a__ : str = "layernorm.weight" if name == "norm.bias": a__ : List[str] = "layernorm.bias" if "head" in name: a__ : Union[str, Any] = name.replace("head" , "classifier" ) else: a__ : Dict = "focalnet." + name return name def UpperCamelCase_ ( __a , __a , __a=False ) -> List[Any]: # fmt: off a__ : Dict = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on a__ : Any = model_name_to_url[model_name] print("Checkpoint URL: " , __a ) a__ : str = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): a__ : Optional[int] = state_dict.pop(__a ) a__ : Optional[Any] = val a__ : int = get_focalnet_config(__a ) a__ : Tuple = FocalNetForImageClassification(__a ) model.eval() # load state dict model.load_state_dict(__a ) # verify conversion a__ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : Any = BitImageProcessor( do_resize=__a , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__a , crop_size=224 , do_normalize=__a , image_mean=__a , image_std=__a , ) a__ : Optional[int] = Image.open(requests.get(__a , stream=__a ).raw ) a__ : Union[str, Any] = processor(images=__a , return_tensors="pt" ) a__ : Optional[int] = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a__ : List[Any] = image_transforms(__a ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __a , atol=1e-4 ) a__ : Tuple = model(**__a ) a__ : Dict = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": a__ : str = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": a__ : int = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": a__ : Dict = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": a__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": a__ : List[Any] = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": a__ : Tuple = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) processor.save_pretrained(__a ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) UpperCamelCase : List[str] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = DanceDiffusionPipeline _lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS _lowercase = PipelineTesterMixin.required_optional_params - { 'callback', 'latents', 'callback_steps', 'output_type', 'num_images_per_prompt', } _lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): torch.manual_seed(0 ) a__ : Tuple = UNetaDModel( block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase__ , use_timestep_embedding=lowerCamelCase__ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , ) a__ : int = IPNDMScheduler() a__ : List[Any] = { "unet": unet, "scheduler": scheduler, } return components def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=0 ): if str(lowerCamelCase__ ).startswith("mps" ): a__ : List[Any] = torch.manual_seed(lowerCamelCase__ ) else: a__ : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) a__ : Dict = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def _UpperCamelCase( self : Tuple ): a__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : Optional[int] = self.get_dummy_components() a__ : Any = DanceDiffusionPipeline(**lowerCamelCase__ ) a__ : str = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Any = self.get_dummy_inputs(lowerCamelCase__ ) a__ : Tuple = pipe(**lowerCamelCase__ ) a__ : str = output.audios a__ : int = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) a__ : int = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def _UpperCamelCase( self : Optional[int] ): return super().test_save_load_local() @skip_mps def _UpperCamelCase( self : Dict ): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def _UpperCamelCase( self : List[Any] ): return super().test_save_load_optional_components() @skip_mps def _UpperCamelCase( self : Tuple ): return super().test_attention_slicing_forward_pass() def _UpperCamelCase( self : str ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : Tuple = torch_device a__ : Optional[int] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ) a__ : int = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[int] = torch.manual_seed(0 ) a__ : List[Any] = pipe(generator=lowerCamelCase__ , num_inference_steps=100 , audio_length_in_s=4.096 ) a__ : Optional[Any] = output.audios a__ : Optional[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a__ : Any = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = torch_device a__ : List[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa ) a__ : str = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[Any] = torch.manual_seed(0 ) a__ : Optional[int] = pipe(generator=lowerCamelCase__ , num_inference_steps=100 , audio_length_in_s=4.096 ) a__ : int = output.audios a__ : List[str] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) a__ : List[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCamelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : Optional[int] ): super().__init__() a__ : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowerCamelCase__ ) a__ : Optional[Any] = list(model.children() )[:-2] a__ : List[str] = nn.Sequential(*lowerCamelCase__ ) a__ : List[str] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[Any] ): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 a__ : Dict = self.pool(self.model(lowerCamelCase__ ) ) a__ : Tuple = torch.flatten(lowerCamelCase__ , start_dim=2 ) a__ : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class A__ ( A__ ): """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ): a__ : List[Any] = [json.loads(lowerCamelCase__ ) for l in open(lowerCamelCase__ )] a__ : int = os.path.dirname(lowerCamelCase__ ) a__ : Dict = tokenizer a__ : int = labels a__ : Optional[int] = len(lowerCamelCase__ ) a__ : int = max_seq_length a__ : Optional[Any] = transforms def __len__( self : List[Any] ): return len(self.data ) def __getitem__( self : Optional[int] , lowerCamelCase__ : Any ): a__ : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCamelCase__ ) ) a__, a__, a__ : str = sentence[0], sentence[1:-1], sentence[-1] a__ : Any = sentence[: self.max_seq_length] a__ : int = torch.zeros(self.n_classes ) a__ : int = 1 a__ : Optional[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" ) a__ : int = self.transforms(lowerCamelCase__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _UpperCamelCase( self : Tuple ): a__ : Dict = Counter() for row in self.data: label_freqs.update(row["label"] ) return label_freqs def UpperCamelCase_ ( __a ) -> Any: a__ : Optional[Any] = [len(row["sentence"] ) for row in batch] a__, a__ : Optional[Any] = len(__a ), max(__a ) a__ : Optional[Any] = torch.zeros(__a , __a , dtype=torch.long ) a__ : str = torch.zeros(__a , __a , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(__a , __a ) ): a__ : str = input_row["sentence"] a__ : Tuple = 1 a__ : Optional[Any] = torch.stack([row["image"] for row in batch] ) a__ : Any = torch.stack([row["label"] for row in batch] ) a__ : Dict = torch.stack([row["image_start_token"] for row in batch] ) a__ : Optional[Any] = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def UpperCamelCase_ ( ) -> Tuple: return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def UpperCamelCase_ ( ) -> Any: return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase : Optional[int] = logging.get_logger(__name__) UpperCamelCase : str = { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json""" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class A__ ( A__ ): """simple docstring""" _lowercase = 'roformer' def __init__( self : Union[str, Any] , lowerCamelCase__ : List[Any]=50_000 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : int=768 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Dict=12 , lowerCamelCase__ : int=3_072 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Union[str, Any]=1_536 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Union[str, Any]=0.02 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=True , **lowerCamelCase__ : str , ): super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = vocab_size a__ : Optional[Any] = hidden_size if embedding_size is None else embedding_size a__ : List[str] = hidden_size a__ : Any = num_hidden_layers a__ : Optional[int] = num_attention_heads a__ : List[str] = hidden_act a__ : Tuple = intermediate_size a__ : Any = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : str = max_position_embeddings a__ : Any = type_vocab_size a__ : int = initializer_range a__ : Optional[Any] = layer_norm_eps a__ : Optional[int] = rotary_value a__ : str = use_cache class A__ ( A__ ): """simple docstring""" @property def _UpperCamelCase( self : List[Any] ): if self.task == "multiple-choice": a__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: a__ : List[str] = {0: "batch", 1: "sequence"} a__ : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class A__ ( A__ , A__ ): """simple docstring""" _lowercase = 1 @register_to_config def __init__( self : Optional[Any] , lowerCamelCase__ : int=2_000 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Tuple=20 , lowerCamelCase__ : Dict=1E-3 ): a__ : Union[str, Any] = None a__ : Dict = None a__ : List[str] = None def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, torch.device] = None ): a__ : Optional[Any] = torch.linspace(1 , self.config.sampling_eps , lowerCamelCase__ , device=lowerCamelCase__ ) def _UpperCamelCase( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=None ): if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score a__ : Optional[int] = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) a__ : Optional[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) a__ : Any = std.flatten() while len(std.shape ) < len(score.shape ): a__ : Union[str, Any] = std.unsqueeze(-1 ) a__ : int = -score / std # compute a__ : List[Any] = -1.0 / len(self.timesteps ) a__ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) a__ : Any = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): a__ : int = beta_t.unsqueeze(-1 ) a__ : Any = -0.5 * beta_t * x a__ : Tuple = torch.sqrt(lowerCamelCase__ ) a__ : int = drift - diffusion**2 * score a__ : int = x + drift * dt # add noise a__ : Optional[int] = randn_tensor(x.shape , layout=x.layout , generator=lowerCamelCase__ , device=x.device , dtype=x.dtype ) a__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self : Any ): return self.config.num_train_timesteps
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
def UpperCamelCase_ ( __a ) -> str: a__ : int = 0 # if input_string is "aba" than new_input_string become "a|b|a" a__ : Union[str, Any] = "" a__ : List[str] = "" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__a ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring a__, a__ : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i a__ : Optional[int] = [1 for i in range(len(__a ) )] # for each character in new_string find corresponding palindromic string a__ : str = 0 for j in range(len(__a ) ): a__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__a ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 a__ : List[str] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: a__ : Optional[Any] = j - k + 1 # noqa: E741 a__ : Optional[int] = j + k - 1 # update max_length and start position if max_length < length[j]: a__ : str = length[j] a__ : Union[str, Any] = j # create that string a__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : int=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Any=99 , lowerCamelCase__ : List[str]=24 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : List[str]=37 , lowerCamelCase__ : Union[str, Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Dict=512 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=1_000 , ): a__ : int = parent a__ : Optional[int] = batch_size a__ : Optional[Any] = seq_length a__ : Tuple = is_training a__ : Union[str, Any] = use_input_mask a__ : str = use_token_type_ids a__ : Union[str, Any] = use_labels a__ : Tuple = vocab_size a__ : Any = hidden_size a__ : Tuple = num_hidden_layers a__ : Union[str, Any] = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : Any = hidden_act a__ : Dict = hidden_dropout_prob a__ : Optional[Any] = attention_probs_dropout_prob a__ : Optional[int] = max_position_embeddings a__ : Any = type_vocab_size a__ : str = type_sequence_label_size a__ : Union[str, Any] = initializer_range a__ : Optional[Any] = num_labels a__ : List[Any] = scope a__ : List[Any] = range_bbox def _UpperCamelCase( self : str ): a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a__ : List[Any] = bbox[i, j, 3] a__ : int = bbox[i, j, 1] a__ : int = t if bbox[i, j, 2] < bbox[i, j, 0]: a__ : str = bbox[i, j, 2] a__ : str = bbox[i, j, 0] a__ : Dict = t a__ : List[Any] = None if self.use_input_mask: a__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a__ : List[str] = None if self.use_token_type_ids: a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a__ : int = None a__ : Union[str, Any] = None if self.use_labels: a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a__ : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _UpperCamelCase( self : Union[str, Any] ): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , ): a__ : Optional[Any] = LiltModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Any = model(lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : Union[str, Any] = model(lowerCamelCase__ , bbox=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) a__ : Any = model(lowerCamelCase__ , bbox=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , ): a__ : Any = self.num_labels a__ : Dict = LiltForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Any = model( lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , ): a__ : Optional[Any] = LiltForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model( lowerCamelCase__ , bbox=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCamelCase( self : Dict ): a__ : Optional[int] = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ( a__ ), ) : List[str] = config_and_inputs a__ : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _lowercase = False _lowercase = False def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ): return True def _UpperCamelCase( self : Dict ): a__ : Any = LiltModelTester(self ) a__ : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : Any ): self.config_tester.run_common_tests() def _UpperCamelCase( self : List[str] ): a__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : str ): a__ : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a__ : Union[str, Any] = type self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Any ): a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Optional[Any] = LiltModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @require_torch @slow class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : int ): a__ : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(lowerCamelCase__ ) a__ : Union[str, Any] = torch.tensor([[1, 2]] , device=lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ ) a__ : Any = torch.Size([1, 2, 768] ) a__ : Any = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=lowerCamelCase__ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase__ , atol=1E-3 ) )
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = AutoencoderKL _lowercase = 'sample' _lowercase = 1e-2 @property def _UpperCamelCase( self : str ): a__ : Any = 4 a__ : Union[str, Any] = 3 a__ : Dict = (32, 32) a__ : int = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ ) return {"sample": image} @property def _UpperCamelCase( self : List[Any] ): return (3, 32, 32) @property def _UpperCamelCase( self : Dict ): return (3, 32, 32) def _UpperCamelCase( self : List[str] ): a__ : str = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } a__ : List[str] = self.dummy_input return init_dict, inputs_dict def _UpperCamelCase( self : Tuple ): pass def _UpperCamelCase( self : Any ): pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def _UpperCamelCase( self : List[str] ): # enable deterministic behavior for gradient checkpointing a__, a__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common() a__ : Dict = self.model_class(**lowerCamelCase__ ) model.to(lowerCamelCase__ ) assert not model.is_gradient_checkpointing and model.training a__ : str = model(**lowerCamelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() a__ : Union[str, Any] = torch.randn_like(lowerCamelCase__ ) a__ : Optional[int] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing a__ : Any = self.model_class(**lowerCamelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCamelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training a__ : int = model_a(**lowerCamelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() a__ : Any = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) a__ : str = dict(model.named_parameters() ) a__ : List[str] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def _UpperCamelCase( self : Dict ): a__, a__ : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowerCamelCase__ ) a__ : Tuple = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _UpperCamelCase( self : Optional[int] ): a__ : Optional[int] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) a__ : List[str] = model.to(lowerCamelCase__ ) model.eval() if torch_device == "mps": a__ : int = torch.manual_seed(0 ) else: a__ : List[str] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) a__ : List[str] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) a__ : Optional[Any] = image.to(lowerCamelCase__ ) with torch.no_grad(): a__ : str = model(lowerCamelCase__ , sample_posterior=lowerCamelCase__ , generator=lowerCamelCase__ ).sample a__ : Dict = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": a__ : Tuple = torch.tensor( [ -4.00_78E-01, -3.83_23E-04, -1.26_81E-01, -1.14_62E-01, 2.00_95E-01, 1.08_93E-01, -8.82_47E-02, -3.03_61E-01, -9.86_44E-03, ] ) elif torch_device == "cpu": a__ : Tuple = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: a__ : Union[str, Any] = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) ) @slow class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ): return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase__ ) for s in shape] )}.npy''' def _UpperCamelCase( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Dict=0 , lowerCamelCase__ : Optional[int]=(4, 3, 512, 512) , lowerCamelCase__ : Tuple=False ): a__ : Dict = torch.floataa if fpaa else torch.floataa a__ : str = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase__ , lowerCamelCase__ ) ) ).to(lowerCamelCase__ ).to(lowerCamelCase__ ) return image def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" , lowerCamelCase__ : Optional[Any]=False ): a__ : Tuple = "fp16" if fpaa else None a__ : Optional[Any] = torch.floataa if fpaa else torch.floataa a__ : Optional[int] = AutoencoderKL.from_pretrained( lowerCamelCase__ , subfolder="vae" , torch_dtype=lowerCamelCase__ , revision=lowerCamelCase__ , ) model.to(lowerCamelCase__ ).eval() return model def _UpperCamelCase( self : Any , lowerCamelCase__ : int=0 ): if torch_device == "mps": return torch.manual_seed(lowerCamelCase__ ) return torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ): a__ : List[str] = self.get_sd_vae_model() a__ : Dict = self.get_sd_image(lowerCamelCase__ ) a__ : List[str] = self.get_generator(lowerCamelCase__ ) with torch.no_grad(): a__ : str = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample assert sample.shape == image.shape a__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() a__ : Dict = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ): a__ : Union[str, Any] = self.get_sd_vae_model(fpaa=lowerCamelCase__ ) a__ : List[str] = self.get_sd_image(lowerCamelCase__ , fpaa=lowerCamelCase__ ) a__ : Tuple = self.get_generator(lowerCamelCase__ ) with torch.no_grad(): a__ : List[str] = model(lowerCamelCase__ , generator=lowerCamelCase__ , sample_posterior=lowerCamelCase__ ).sample assert sample.shape == image.shape a__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu() a__ : Dict = torch.tensor(lowerCamelCase__ ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ): a__ : List[Any] = self.get_sd_vae_model() a__ : str = self.get_sd_image(lowerCamelCase__ ) with torch.no_grad(): a__ : Optional[int] = model(lowerCamelCase__ ).sample assert sample.shape == image.shape a__ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() a__ : List[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ): a__ : Any = self.get_sd_vae_model() a__ : Optional[Any] = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): a__ : Dict = model.decode(lowerCamelCase__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] a__ : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu() a__ : int = torch.tensor(lowerCamelCase__ ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ): a__ : str = self.get_sd_vae_model(fpaa=lowerCamelCase__ ) a__ : Any = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase__ ) with torch.no_grad(): a__ : List[str] = model.decode(lowerCamelCase__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] a__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu() a__ : Tuple = torch.tensor(lowerCamelCase__ ) assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _UpperCamelCase( self : str , lowerCamelCase__ : List[str] ): a__ : int = self.get_sd_vae_model(fpaa=lowerCamelCase__ ) a__ : Any = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase__ ) with torch.no_grad(): a__ : Union[str, Any] = model.decode(lowerCamelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a__ : List[str] = model.decode(lowerCamelCase__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def _UpperCamelCase( self : List[str] , lowerCamelCase__ : Optional[Any] ): a__ : Any = self.get_sd_vae_model() a__ : int = self.get_sd_image(lowerCamelCase__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): a__ : Union[str, Any] = model.decode(lowerCamelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): a__ : str = model.decode(lowerCamelCase__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ): a__ : Union[str, Any] = self.get_sd_vae_model() a__ : Optional[Any] = self.get_sd_image(lowerCamelCase__ ) a__ : Any = self.get_generator(lowerCamelCase__ ) with torch.no_grad(): a__ : Optional[Any] = model.encode(lowerCamelCase__ ).latent_dist a__ : Any = dist.sample(generator=lowerCamelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] a__ : Any = sample[0, -1, -3:, -3:].flatten().cpu() a__ : Dict = torch.tensor(lowerCamelCase__ ) a__ : Optional[int] = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , atol=lowerCamelCase__ )
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def UpperCamelCase_ ( __a , __a , __a ) -> Tuple: hf_model.apply_weight_norm() a__ : Dict = checkpoint["input_conv.weight_g"] a__ : Optional[Any] = checkpoint["input_conv.weight_v"] a__ : Union[str, Any] = checkpoint["input_conv.bias"] for i in range(len(config.upsample_rates ) ): a__ : List[str] = checkpoint[f'''upsamples.{i}.1.weight_g'''] a__ : int = checkpoint[f'''upsamples.{i}.1.weight_v'''] a__ : List[str] = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): a__ : Any = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] a__ : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] a__ : Tuple = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] a__ : int = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] a__ : Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] a__ : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] a__ : Any = checkpoint["output_conv.1.weight_g"] a__ : Optional[Any] = checkpoint["output_conv.1.weight_v"] a__ : Tuple = checkpoint["output_conv.1.bias"] hf_model.remove_weight_norm() @torch.no_grad() def UpperCamelCase_ ( __a , __a , __a , __a=None , __a=None , ) -> List[str]: if config_path is not None: a__ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(__a ) else: a__ : List[Any] = SpeechTaHifiGanConfig() a__ : List[Any] = SpeechTaHifiGan(__a ) a__ : Tuple = torch.load(__a ) load_weights(orig_checkpoint["model"]["generator"] , __a , __a ) a__ : int = np.load(__a ) a__ : Union[str, Any] = stats[0].reshape(-1 ) a__ : Optional[Any] = stats[1].reshape(-1 ) a__ : Optional[int] = torch.from_numpy(__a ).float() a__ : Tuple = torch.from_numpy(__a ).float() model.save_pretrained(__a ) if repo_id: print("Pushing to the hub..." ) model.push_to_hub(__a ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) UpperCamelCase : Optional[int] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def UpperCamelCase_ ( __a ) -> List[str]: a__ : int = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a__ : List[Any] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: a__ : str = 4 a__ : Optional[int] = 48 a__ : int = "pixelshuffle_aux" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a__ : int = [6, 6, 6, 6] a__ : List[str] = 60 a__ : Union[str, Any] = [6, 6, 6, 6] a__ : List[Any] = "pixelshuffledirect" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a__ : Optional[Any] = 4 a__ : Dict = "nearest+conv" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: a__ : int = 1 a__ : Tuple = 1 a__ : Optional[Any] = 126 a__ : List[Any] = 7 a__ : List[str] = 255.0 a__ : Union[str, Any] = "" return config def UpperCamelCase_ ( __a , __a ) -> List[Any]: if "patch_embed.proj" in name and "layers" not in name: a__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: a__ : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" ) if "layers" in name: a__ : Dict = name.replace("layers" , "encoder.stages" ) if "residual_group.blocks" in name: a__ : str = name.replace("residual_group.blocks" , "layers" ) if "attn.proj" in name: a__ : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: a__ : List[Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: a__ : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: a__ : Optional[Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: a__ : str = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: a__ : Optional[int] = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: a__ : List[str] = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: a__ : Tuple = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: a__ : Any = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: a__ : Tuple = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if "patch_embed.proj" in name: a__ : int = name.replace("patch_embed.proj" , "patch_embed.projection" ) if name == "norm.weight": a__ : Optional[Any] = "layernorm.weight" if name == "norm.bias": a__ : str = "layernorm.bias" if "conv_first" in name: a__ : Dict = name.replace("conv_first" , "first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: a__ : Optional[Any] = name.replace("conv_last" , "final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: a__ : Optional[int] = name.replace("conv_before_upsample.0" , "conv_before_upsample" ) if "upsample.0" in name: a__ : Any = name.replace("upsample.0" , "upsample.convolution_0" ) if "upsample.2" in name: a__ : Optional[Any] = name.replace("upsample.2" , "upsample.convolution_1" ) a__ : Tuple = "upsample." + name elif config.upsampler == "pixelshuffledirect": a__ : List[Any] = name.replace("upsample.0.weight" , "upsample.conv.weight" ) a__ : str = name.replace("upsample.0.bias" , "upsample.conv.bias" ) else: pass else: a__ : Dict = "swin2sr." + name return name def UpperCamelCase_ ( __a , __a ) -> Any: for key in orig_state_dict.copy().keys(): a__ : Optional[int] = orig_state_dict.pop(__a ) if "qkv" in key: a__ : Tuple = key.split("." ) a__ : Tuple = int(key_split[1] ) a__ : List[Any] = int(key_split[4] ) a__ : Union[str, Any] = config.embed_dim if "weight" in key: a__ : Tuple = val[:dim, :] a__ : List[Any] = val[dim : dim * 2, :] a__ : Dict = val[-dim:, :] else: a__ : Any = val[:dim] a__ : Union[str, Any] = val[dim : dim * 2] a__ : List[Any] = val[-dim:] pass else: a__ : Optional[Any] = val return orig_state_dict def UpperCamelCase_ ( __a , __a , __a ) -> str: a__ : Optional[int] = get_config(__a ) a__ : Optional[Any] = SwinaSRForImageSuperResolution(__a ) model.eval() a__ : Any = torch.hub.load_state_dict_from_url(__a , map_location="cpu" ) a__ : int = convert_state_dict(__a , __a ) a__, a__ : int = model.load_state_dict(__a , strict=__a ) if len(__a ) > 0: raise ValueError("Missing keys when converting: {}".format(__a ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'''Unexpected key {key} in state_dict''' ) # verify values a__ : str = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true" a__ : str = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) a__ : List[str] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values a__ : Dict = 126 if "Jpeg" in checkpoint_url else 256 a__ : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) a__ : List[str] = transforms(__a ).unsqueeze(0 ) if config.num_channels == 1: a__ : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 ) a__ : List[Any] = model(__a ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: a__ : Dict = torch.Size([1, 3, 512, 512] ) a__ : int = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: a__ : str = torch.Size([1, 3, 1_024, 1_024] ) a__ : Dict = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here a__ : Optional[int] = torch.Size([1, 3, 1_024, 1_024] ) a__ : Optional[Any] = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: a__ : int = torch.Size([1, 3, 512, 512] ) a__ : Any = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: a__ : str = torch.Size([1, 3, 1_024, 1_024] ) a__ : Optional[Any] = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __a , atol=1e-3 ) print("Looks ok!" ) a__ : Union[str, Any] = { "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": ( "swin2SR-classical-sr-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": ( "swin2SR-classical-sr-x4-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": ( "swin2SR-compressed-sr-x4-48" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": ( "swin2SR-lightweight-x2-64" ), "https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": ( "swin2SR-realworld-sr-x4-64-bsrgan-psnr" ), } a__ : Union[str, Any] = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__a ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__a ) if push_to_hub: model.push_to_hub(f'''caidas/{model_name}''' ) processor.push_to_hub(f'''caidas/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") UpperCamelCase : Any = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 UpperCamelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") UpperCamelCase : List[Any] = get_tests_dir("""fixtures/vocab.json""") UpperCamelCase : Dict = get_tests_dir("""fixtures""") class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] def _UpperCamelCase( self : Any ): a__ : Union[str, Any] = 0 def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdirname: a__ : Optional[Any] = WavaVecaConfig() a__ : Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" ) # save in new folder model_config.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) a__ : int = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : int ): with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ) copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) ) a__ : List[str] = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : str ): with tempfile.TemporaryDirectory() as tmpdirname: a__ : List[Any] = WavaVecaFeatureExtractor() a__ : Optional[int] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) a__ : Dict = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ ) # save in new folder processor.save_pretrained(lowerCamelCase__ ) # drop `processor_class` in tokenizer with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f: a__ : Optional[Any] = json.load(lowerCamelCase__ ) config_dict.pop("processor_class" ) with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f: f.write(json.dumps(lowerCamelCase__ ) ) a__ : Union[str, Any] = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmpdirname: a__ : Any = WavaVecaFeatureExtractor() a__ : List[str] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" ) a__ : Any = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ ) # save in new folder processor.save_pretrained(lowerCamelCase__ ) # drop `processor_class` in feature extractor with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f: a__ : List[str] = json.load(lowerCamelCase__ ) config_dict.pop("processor_class" ) with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f: f.write(json.dumps(lowerCamelCase__ ) ) a__ : Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: a__ : Union[str, Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" ) model_config.save_pretrained(lowerCamelCase__ ) # copy relevant files copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) ) # create emtpy sample processor with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f: f.write("{}" ) a__ : int = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase__ ): a__ : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase__ ): a__ : int = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ ) a__ : str = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) a__ : List[str] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" ) a__ : List[Any] = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" ) # Test we can also load the slow version a__ : Optional[int] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ ) a__ : Optional[int] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" ) else: self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" ) def _UpperCamelCase( self : List[Any] ): try: AutoConfig.register("custom" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ ) AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API a__ : Tuple = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: a__ : List[Any] = os.path.join(lowerCamelCase__ , "vocab.txt" ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) a__ : Any = CustomTokenizer(lowerCamelCase__ ) a__ : Dict = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(lowerCamelCase__ ) a__ : Union[str, Any] = AutoProcessor.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _UpperCamelCase( self : Union[str, Any] ): class A__ ( A__ ): """simple docstring""" _lowercase = False class A__ ( A__ ): """simple docstring""" _lowercase = False class A__ ( A__ ): """simple docstring""" _lowercase = 'AutoFeatureExtractor' _lowercase = 'AutoTokenizer' _lowercase = False try: AutoConfig.register("custom" , lowerCamelCase__ ) AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ ) AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ ) AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ ) # If remote code is not set, the default is to use local classes. a__ : int = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. a__ : List[Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. a__ : List[Any] = AutoProcessor.from_pretrained( "hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ ) self.assertEqual(processor.__class__.__name__ , "NewProcessor" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _UpperCamelCase( self : Union[str, Any] ): a__ : str = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" ) def _UpperCamelCase( self : List[str] ): a__ : str = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" ) self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" ) @is_staging_test class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def _UpperCamelCase( cls : Optional[Any] ): a__ : Union[str, Any] = TOKEN HfFolder.save_token(lowerCamelCase__ ) @classmethod def _UpperCamelCase( cls : Tuple ): try: delete_repo(token=cls._token , repo_id="test-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-processor" ) except HTTPError: pass def _UpperCamelCase( self : Optional[int] ): a__ : Optional[Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase__ , "test-processor" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token ) a__ : List[str] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _UpperCamelCase( self : Dict ): a__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase__ , "test-processor-org" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token , organization="valid_org" , ) a__ : Any = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _UpperCamelCase( self : Optional[int] ): CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() a__ : List[Any] = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: a__ : str = os.path.join(lowerCamelCase__ , "vocab.txt" ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) ) a__ : Any = CustomTokenizer(lowerCamelCase__ ) a__ : List[Any] = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) a__ : Optional[Any] = Repository(lowerCamelCase__ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(lowerCamelCase__ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { "AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor", "AutoProcessor": "custom_processing.CustomProcessor", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) ) as f: a__ : Dict = json.load(lowerCamelCase__ ) self.assertDictEqual( tokenizer_config["auto_map"] , { "AutoTokenizer": ["custom_tokenization.CustomTokenizer", None], "AutoProcessor": "custom_processing.CustomProcessor", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_feature_extraction.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_tokenization.py" ) ) ) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_processing.py" ) ) ) repo.push_to_hub() a__ : List[str] = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCamelCase__ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : """simple docstring""" def __init__( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : int="resnet50" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=True , ): a__ : Tuple = parent a__ : Any = out_indices if out_indices is not None else [4] a__ : str = stage_names a__ : Dict = out_features a__ : Dict = backbone a__ : Optional[Any] = batch_size a__ : int = image_size a__ : List[Any] = num_channels a__ : List[Any] = use_pretrained_backbone a__ : List[Any] = is_training def _UpperCamelCase( self : Optional[Any] ): a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Tuple = self.get_config() return config, pixel_values def _UpperCamelCase( self : Optional[Any] ): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ): a__ : Tuple = TimmBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _UpperCamelCase( self : int ): a__ : int = self.prepare_config_and_inputs() a__, a__ : List[str] = config_and_inputs a__ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = (TimmBackbone,) if is_torch_available() else () _lowercase = {'feature-extraction': TimmBackbone} if is_torch_available() else {} _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Tuple ): a__ : str = TimmBackboneModelTester(self ) a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[Any] = "resnet18" a__ : List[Any] = "microsoft/resnet-18" a__ : str = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ ) a__ : List[str] = AutoBackbone.from_pretrained(lowerCamelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a__ : List[Any] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] ) a__ : int = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def _UpperCamelCase( self : Any ): pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def _UpperCamelCase( self : Union[str, Any] ): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : int ): pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def _UpperCamelCase( self : Optional[Any] ): pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def _UpperCamelCase( self : List[Any] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def _UpperCamelCase( self : List[str] ): pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def _UpperCamelCase( self : Tuple ): pass @unittest.skip("Safetensors is not supported by timm." ) def _UpperCamelCase( self : Union[str, Any] ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Union[str, Any] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(lowerCamelCase__ ) a__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : List[Any] = [*signature.parameters.keys()] a__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Dict ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : List[Any] = True a__ : List[str] = self.has_attentions # no need to test all models as different heads yield the same functionality a__ : Union[str, Any] = self.all_model_classes[0] a__ : Dict = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) a__ : int = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = model(**lowerCamelCase__ ) a__ : int = outputs[0][-1] # Encoder-/Decoder-only models a__ : List[str] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a__ : str = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(**lowerCamelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a__ : Tuple = copy.deepcopy(lowerCamelCase__ ) a__ : Any = None a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(**lowerCamelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a__ : Any = copy.deepcopy(lowerCamelCase__ ) a__ : List[str] = False a__ : Union[str, Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(**lowerCamelCase__ )
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
def UpperCamelCase_ ( __a , __a = False ) -> bool: if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_317_044_064_679_887_385_961_981 and not allow_probable: raise ValueError( "Warning: upper bound of deterministic test is exceeded. " "Pass allow_probable=True to allow probabilistic test. " "A return value of True indicates a probable prime." ) # array bounds provided by analysis a__ : Dict = [ 2_047, 1_373_653, 25_326_001, 3_215_031_751, 2_152_302_898_747, 3_474_749_660_383, 341_550_071_728_321, 1, 3_825_123_056_546_413_051, 1, 1, 318_665_857_834_031_151_167_461, 3_317_044_064_679_887_385_961_981, ] a__ : Dict = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(__a , 1 ): if n < _p: # then we have our last prime to check a__ : Any = primes[:idx] break a__, a__ : List[str] = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: a__ : List[str] = False for r in range(__a ): a__ : List[str] = pow(__a , d * 2**r , __a ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): a__ : Union[str, Any] = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def UpperCamelCase_ ( ) -> None: assert not miller_rabin(561 ) assert miller_rabin(563 ) # 2047 assert not miller_rabin(838_201 ) assert miller_rabin(838_207 ) # 1_373_653 assert not miller_rabin(17_316_001 ) assert miller_rabin(17_316_017 ) # 25_326_001 assert not miller_rabin(3_078_386_641 ) assert miller_rabin(3_078_386_653 ) # 3_215_031_751 assert not miller_rabin(1_713_045_574_801 ) assert miller_rabin(1_713_045_574_819 ) # 2_152_302_898_747 assert not miller_rabin(2_779_799_728_307 ) assert miller_rabin(2_779_799_728_327 ) # 3_474_749_660_383 assert not miller_rabin(113_850_023_909_441 ) assert miller_rabin(113_850_023_909_527 ) # 341_550_071_728_321 assert not miller_rabin(1_275_041_018_848_804_351 ) assert miller_rabin(1_275_041_018_848_804_391 ) # 3_825_123_056_546_413_051 assert not miller_rabin(79_666_464_458_507_787_791_867 ) assert miller_rabin(79_666_464_458_507_787_791_951 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(552_840_677_446_647_897_660_333 ) assert miller_rabin(552_840_677_446_647_897_660_359 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase : Tuple = """true""" def UpperCamelCase_ ( __a , __a=82 , __a=16 ) -> List[Any]: set_seed(42 ) a__ : Any = RegressionModel() a__ : Optional[Any] = deepcopy(__a ) a__ : Dict = RegressionDataset(length=__a ) a__ : List[str] = DataLoader(__a , batch_size=__a ) model.to(accelerator.device ) a__, a__ : int = accelerator.prepare(__a , __a ) return model, ddp_model, dataloader def UpperCamelCase_ ( __a , __a=False ) -> str: a__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) a__ : str = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(__a ): a__ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a ) return outputs with accelerator.main_process_first(): a__ : int = dataset.map( __a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , ) a__ : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__a ): if use_longest: return tokenizer.pad(__a , padding="longest" , return_tensors="pt" ) return tokenizer.pad(__a , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(__a , shuffle=__a , collate_fn=__a , batch_size=16 ) def UpperCamelCase_ ( __a , __a ) -> List[Any]: a__ : List[Any] = Accelerator(dispatch_batches=__a , split_batches=__a ) a__ : Any = get_dataloader(__a , not dispatch_batches ) a__ : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=__a ) a__, a__ : Tuple = accelerator.prepare(__a , __a ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def UpperCamelCase_ ( __a , __a , __a ) -> str: a__ : List[str] = [] for batch in dataloader: a__, a__ : Tuple = batch.values() with torch.no_grad(): a__ : Any = model(__a ) a__, a__ : Union[str, Any] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) a__, a__ : int = [], [] for logit, targ in logits_and_targets: logits.append(__a ) targs.append(__a ) a__, a__ : List[str] = torch.cat(__a ), torch.cat(__a ) return logits, targs def UpperCamelCase_ ( __a , __a=82 , __a=False , __a=False , __a=16 ) -> Optional[int]: a__, a__, a__ : Optional[Any] = get_basic_setup(__a , __a , __a ) a__, a__ : int = generate_predictions(__a , __a , __a ) assert ( len(__a ) == num_samples ), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__a )}''' def UpperCamelCase_ ( __a = False , __a = False ) -> Optional[Any]: a__ : Any = evaluate.load("glue" , "mrpc" ) a__, a__ : List[Any] = get_mrpc_setup(__a , __a ) # First do baseline a__, a__, a__ : Any = setup["no"] model.to(__a ) model.eval() for batch in dataloader: batch.to(__a ) with torch.inference_mode(): a__ : str = model(**__a ) a__ : List[str] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__a , references=batch["labels"] ) a__ : Optional[Any] = metric.compute() # Then do distributed a__, a__, a__ : Any = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): a__ : Optional[Any] = model(**__a ) a__ : Any = outputs.logits.argmax(dim=-1 ) a__ : str = batch["labels"] a__, a__ : Optional[int] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__a , references=__a ) a__ : int = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def UpperCamelCase_ ( ) -> Dict: a__ : int = Accelerator(split_batches=__a , dispatch_batches=__a ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(__a , __a ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: a__ : Dict = Accelerator(split_batches=__a , dispatch_batches=__a ) if accelerator.is_local_main_process: print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(__a , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) a__ : Optional[Any] = Accelerator() test_torch_metrics(__a , 512 ) accelerator.state._reset_state() def UpperCamelCase_ ( __a ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : Union[str, Any] = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCamelCase_ ( __a , __a , __a , __a ) -> Union[str, Any]: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def UpperCamelCase_ ( __a , __a , __a , __a , __a=True ) -> str: model.train() a__ : str = model(__a ) a__ : Dict = F.mse_loss(__a , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__a ) def UpperCamelCase_ ( __a , __a=False ) -> str: set_seed(42 ) a__ : int = RegressionModel() a__ : Optional[int] = deepcopy(__a ) a__ : List[str] = RegressionDataset(length=80 ) a__ : Tuple = DataLoader(__a , batch_size=16 ) model.to(accelerator.device ) if sched: a__ : str = AdamW(params=model.parameters() , lr=1e-3 ) a__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 ) a__ : List[str] = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 ) a__ : Optional[Any] = LambdaLR(__a , lr_lambda=lambda __a : epoch**0.65 ) # Make a copy of `model` if sched: a__, a__, a__, a__ : Optional[Any] = accelerator.prepare(__a , __a , __a , __a ) else: a__, a__ : Optional[Any] = accelerator.prepare(__a , __a ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCamelCase_ ( __a ) -> Tuple: # Test when on a single CPU or GPU that the context manager does nothing a__, a__, a__ : Dict = get_training_setup(__a ) # Use a single batch a__, a__ : Optional[Any] = next(iter(__a ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a__, a__ : str = accelerator.gather((ddp_input, ddp_target) ) a__, a__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__a , __a , __a , __a ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__a ): step_model(__a , __a , __a , __a ) else: # Sync grads step_model(__a , __a , __a , __a ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__a , __a , __a , __a ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) a__ : Any = ddp_input[torch.randperm(len(__a ) )] def UpperCamelCase_ ( __a ) -> Optional[Any]: # Test on distributed setup that context manager behaves properly a__, a__, a__ : List[str] = get_training_setup(__a ) # Use a single batch a__, a__ : Optional[int] = next(iter(__a ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model a__, a__ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) a__, a__ : Tuple = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__a , __a , __a , __a ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__a ): step_model(__a , __a , __a , __a ) else: # Sync grads step_model(__a , __a , __a , __a ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) a__ : Dict = ddp_input[torch.randperm(len(__a ) )] def UpperCamelCase_ ( __a=False , __a=False ) -> Optional[Any]: a__ : List[str] = Accelerator( split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a__, a__, a__ : Optional[int] = get_training_setup(__a ) for iteration, batch in enumerate(__a ): a__, a__ : Optional[Any] = batch.values() # Gather the distributed inputs and targs for the base model a__, a__ : List[Any] = accelerator.gather((ddp_input, ddp_target) ) a__, a__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__a , __a , __a , __a , __a ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__a ): step_model(__a , __a , __a , __a ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__a ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) a__ : Union[str, Any] = ddp_input[torch.randperm(len(__a ) )] GradientState._reset_state() def UpperCamelCase_ ( __a=False , __a=False ) -> int: a__ : List[Any] = Accelerator( split_batches=__a , dispatch_batches=__a , gradient_accumulation_steps=2 ) # Test that context manager behaves properly a__, a__, a__, a__, a__, a__, a__ : Any = get_training_setup(__a , __a ) for iteration, batch in enumerate(__a ): a__, a__ : Tuple = batch.values() # Gather the distributed inputs and targs for the base model a__, a__ : List[str] = accelerator.gather((ddp_input, ddp_target) ) a__, a__ : int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__a , __a , __a , __a , __a ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__a )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__a ): step_model(__a , __a , __a , __a ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' a__ : Union[str, Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__a )) if accelerator.num_processes > 1: check_model_parameters(__a , __a , __a , __a ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def UpperCamelCase_ ( ) -> List[str]: a__ : Dict = Accelerator() a__ : Dict = RegressionDataset(length=80 ) a__ : Optional[Any] = DataLoader(__a , batch_size=16 ) a__ : Optional[int] = RegressionDataset(length=96 ) a__ : Dict = DataLoader(__a , batch_size=16 ) a__, a__ : Optional[int] = accelerator.prepare(__a , __a ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__a ): assert id(accelerator.gradient_state.active_dataloader ) == id(__a ) if iteration < len(__a ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__a ): assert id(accelerator.gradient_state.active_dataloader ) == id(__a ) if batch_num < len(__a ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCamelCase_ ( ) -> List[Any]: a__ : Optional[Any] = Accelerator() a__ : Optional[Any] = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(__a ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(__a ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(__a , __a ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(__a , __a ) def UpperCamelCase_ ( __a ) -> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = DiTPipeline _lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _lowercase = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } _lowercase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _lowercase = False def _UpperCamelCase( self : Union[str, Any] ): torch.manual_seed(0 ) a__ : Optional[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase__ , ) a__ : List[str] = AutoencoderKL() a__ : str = DDIMScheduler() a__ : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ): if str(lowerCamelCase__ ).startswith("mps" ): a__ : Any = torch.manual_seed(lowerCamelCase__ ) else: a__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) a__ : int = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _UpperCamelCase( self : List[str] ): a__ : int = "cpu" a__ : Optional[int] = self.get_dummy_components() a__ : List[Any] = self.pipeline_class(**lowerCamelCase__ ) pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : str = self.get_dummy_inputs(lowerCamelCase__ ) a__ : Optional[int] = pipe(**lowerCamelCase__ ).images a__ : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) a__ : Dict = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) a__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase__ , 1E-3 ) def _UpperCamelCase( self : str ): self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase__ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _UpperCamelCase( self : Tuple ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : int ): a__ : int = torch.manual_seed(0 ) a__ : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) a__ : Any = ["vase", "umbrella", "white shark", "white wolf"] a__ : Tuple = pipe.get_label_ids(lowerCamelCase__ ) a__ : int = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ): a__ : str = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def _UpperCamelCase( self : Dict ): a__ : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) a__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) a__ : List[str] = ["vase", "umbrella"] a__ : List[str] = pipe.get_label_ids(lowerCamelCase__ ) a__ : Optional[Any] = torch.manual_seed(0 ) a__ : Union[str, Any] = pipe(lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowerCamelCase__ , lowerCamelCase__ ): a__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : str = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } UpperCamelCase : Optional[int] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = GPTaTokenizer def __init__( self : Optional[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Any="<|endoftext|>" , lowerCamelCase__ : Dict="<|endoftext|>" , lowerCamelCase__ : Tuple="<|endoftext|>" , lowerCamelCase__ : str=False , **lowerCamelCase__ : Any , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Dict = kwargs.pop("add_bos_token" , lowerCamelCase__ ) a__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Optional[int] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : List[str] = add_prefix_space a__ : Union[str, Any] = pre_tok_class(**lowerCamelCase__ ) a__ : int = add_prefix_space def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): a__ : str = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ): a__ : Any = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Any , lowerCamelCase__ : "Conversation" ): a__ : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] ) if len(lowerCamelCase__ ) > self.model_max_length: a__ : Optional[int] = input_ids[-self.model_max_length :] return input_ids
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
def UpperCamelCase_ ( __a ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence a__ : str = gray_code_sequence_string(__a ) # # convert them to integers for i in range(len(__a ) ): a__ : Union[str, Any] = int(sequence[i] , 2 ) return sequence def UpperCamelCase_ ( __a ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] a__ : List[str] = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits a__ : Union[str, Any] = gray_code_sequence_string(bit_count - 1 ) a__ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): a__ : int = "0" + smaller_sequence[i] sequence.append(__a ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): a__ : Optional[Any] = "1" + smaller_sequence[i] sequence.append(__a ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class A__ : """simple docstring""" pass
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
from bisect import bisect from itertools import accumulate def UpperCamelCase_ ( __a , __a , __a , __a ) -> List[str]: a__ : Optional[Any] = sorted(zip(__a , __a ) , key=lambda __a : x[0] / x[1] , reverse=__a ) a__, a__ : List[Any] = [i[0] for i in r], [i[1] for i in r] a__ : Dict = list(accumulate(__a ) ) a__ : List[Any] = bisect(__a , __a ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline UpperCamelCase : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False) parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""") parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""") UpperCamelCase : Any = parser.parse_args() UpperCamelCase : Tuple = """cpu""" UpperCamelCase : int = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings""" UpperCamelCase : List[Any] = """path-to-your-trained-model""" UpperCamelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) UpperCamelCase : Dict = pipe.to(device) # to channels last UpperCamelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last) UpperCamelCase : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last) UpperCamelCase : str = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: UpperCamelCase : str = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex UpperCamelCase : str = torch.randn(2, 4, 64, 64) UpperCamelCase : Union[str, Any] = torch.rand(1) * 999 UpperCamelCase : Any = torch.randn(2, 77, 768) UpperCamelCase : Tuple = (sample, timestep, encoder_hidden_status) try: UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: UpperCamelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) UpperCamelCase : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) UpperCamelCase : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: UpperCamelCase : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute UpperCamelCase : Optional[int] = 666 UpperCamelCase : Any = torch.Generator(device).manual_seed(seed) UpperCamelCase : int = {"""generator""": generator} if args.steps is not None: UpperCamelCase : Tuple = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0] # save image image.save("""generated.png""")
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
def UpperCamelCase_ ( __a ) -> list[int]: a__ : str = len(__a ) for i in range(__a ): for j in range(i + 1 , __a ): if numbers[j] < numbers[i]: a__, a__ : List[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": UpperCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase : Any = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase : str = logging.get_logger(__name__) @add_end_docstrings(A__ ) class A__ ( A__ ): """simple docstring""" def __init__( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : int ): super().__init__(*lowerCamelCase__ , **lowerCamelCase__ ) requires_backends(self , "vision" ) self.check_model_type(lowerCamelCase__ ) def __call__( self : List[Any] , lowerCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase__ : Optional[int] ): return super().__call__(lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : List[str] , **lowerCamelCase__ : int ): return {}, {}, {} def _UpperCamelCase( self : int , lowerCamelCase__ : Any ): a__ : str = load_image(lowerCamelCase__ ) a__ : int = image.size a__ : Optional[Any] = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework ) return model_inputs def _UpperCamelCase( self : str , lowerCamelCase__ : int ): a__ : List[Any] = self.model(**lowerCamelCase__ ) return model_outputs def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ): a__ : Tuple = model_outputs.predicted_depth a__ : Tuple = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=lowerCamelCase__ ) a__ : Optional[Any] = prediction.squeeze().cpu().numpy() a__ : Tuple = (output * 255 / np.max(lowerCamelCase__ )).astype("uint8" ) a__ : List[Any] = Image.fromarray(lowerCamelCase__ ) a__ : Optional[Any] = {} a__ : List[str] = predicted_depth a__ : Tuple = depth return output_dict
37
from statistics import mean, stdev def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : List[str] = min(__a ) a__ : str = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def UpperCamelCase_ ( __a , __a = 3 ) -> list: a__ : str = mean(__a ) a__ : List[str] = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
37
1
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope="session" ) def UpperCamelCase_ ( ) -> List[Any]: a__ : Optional[Any] = 10 a__ : Dict = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string" ) ), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ), "answers": datasets.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), "id": datasets.Value("int64" ), } ) a__ : Dict = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(__a ) ), } , features=__a , ) return dataset @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> List[Any]: a__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" ) dataset.map(cache_file_name=__a ) return filename # FILE_CONTENT + files UpperCamelCase : Tuple = """\ Text data. Second line of data.""" @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> int: a__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt" a__ : List[Any] = FILE_CONTENT with open(__a , "w" ) as f: f.write(__a ) return filename @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> List[str]: import bza a__ : str = tmp_path_factory.mktemp("data" ) / "file.txt.bz2" a__ : Optional[int] = bytes(__a , "utf-8" ) with bza.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Tuple: import gzip a__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" ) a__ : List[Any] = bytes(__a , "utf-8" ) with gzip.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Dict: if datasets.config.LZ4_AVAILABLE: import lza.frame a__ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4" a__ : Tuple = bytes(__a , "utf-8" ) with lza.frame.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: if datasets.config.PY7ZR_AVAILABLE: import pyazr a__ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.7z" with pyazr.SevenZipFile(__a , "w" ) as archive: archive.write(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: import tarfile a__ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar" with tarfile.TarFile(__a , "w" ) as f: f.add(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> str: import lzma a__ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.xz" a__ : Any = bytes(__a , "utf-8" ) with lzma.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: import zipfile a__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> int: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd a__ : int = tmp_path_factory.mktemp("data" ) / "file.txt.zst" a__ : str = bytes(__a , "utf-8" ) with zstd.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> str: a__ : Any = tmp_path_factory.mktemp("data" ) / "file.xml" a__ : Optional[int] = textwrap.dedent( "\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" ) with open(__a , "w" ) as f: f.write(__a ) return filename UpperCamelCase : Tuple = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] UpperCamelCase : Any = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] UpperCamelCase : str = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } UpperCamelCase : str = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] UpperCamelCase : List[str] = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope="session" ) def UpperCamelCase_ ( ) -> Optional[Any]: return DATA_DICT_OF_LISTS @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Dict = datasets.Dataset.from_dict(__a ) a__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" ) dataset.map(cache_file_name=__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> List[str]: a__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" ) with contextlib.closing(sqlitea.connect(__a ) ) as con: a__ : int = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" ) for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> int: a__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" ) with open(__a , "w" , newline="" ) as f: a__ : Optional[int] = csv.DictWriter(__a , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" ) with open(__a , "w" , newline="" ) as f: a__ : Optional[Any] = csv.DictWriter(__a , fieldnames=["col_1", "col_2", "col_3"] ) writer.writeheader() for item in DATA: writer.writerow(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: import bza a__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2" with open(__a , "rb" ) as f: a__ : Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(__a , "wb" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(__a ) ) f.write(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Dict: a__ : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) ) f.write(__a , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" ) a__ : Dict = pa.schema( { "col_1": pa.string(), "col_2": pa.intaa(), "col_3": pa.floataa(), } ) with open(__a , "wb" ) as f: a__ : List[Any] = pq.ParquetWriter(__a , schema=__a ) a__ : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__a ) )] for k in DATA[0]} , schema=__a ) writer.write_table(__a ) writer.close() return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Tuple: a__ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) a__ : str = {"data": DATA} with open(__a , "w" ) as f: json.dump(__a , __a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Tuple: a__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" ) a__ : Optional[Any] = {"data": DATA_DICT_OF_LISTS} with open(__a , "w" ) as f: json.dump(__a , __a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Any: a__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" ) with open(__a , "w" ) as f: for item in DATA: f.write(json.dumps(__a ) + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> List[Any]: a__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" ) with open(__a , "w" ) as f: for item in DATA: f.write(json.dumps(__a ) + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Tuple: a__ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" ) with open(__a , "w" ) as f: for item in DATA_312: f.write(json.dumps(__a ) + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> int: a__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" ) with open(__a , "w" ) as f: for item in DATA_STR: f.write(json.dumps(__a ) + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: import gzip a__ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" ) with open(__a , "rb" ) as orig_file: with gzip.open(__a , "wb" ) as zipped_file: zipped_file.writelines(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: import gzip a__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" ) with open(__a , "rb" ) as orig_file: with gzip.open(__a , "wb" ) as zipped_file: zipped_file.writelines(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Any: a__ : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(__a ) ) f.write(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: a__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.join("nested" , os.path.basename(__a ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Dict: a__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar" with tarfile.TarFile(__a , "w" ) as f: f.add(__a , arcname=os.path.basename(__a ) ) f.add(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> Tuple: a__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(__a , "w" ) as f: f.add(__a , arcname=os.path.join("nested" , os.path.basename(__a ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : List[Any] = ["0", "1", "2", "3"] a__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" ) with open(__a , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Tuple = ["0", "1", "2", "3"] a__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" ) with open(__a , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> List[Any]: a__ : Any = ["0", "1", "2", "3"] a__ : int = tmp_path_factory.mktemp("data" ) / "dataset.abc" with open(__a , "w" ) as f: for item in data: f.write(item + "\n" ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.text.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(__a ) ) f.write(__a , arcname=os.path.basename(__a ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) f.write(__a , arcname=os.path.join("main_dir" , os.path.basename(__a ) ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[int]: a__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename("unsupported.ext" ) ) f.write(__a , arcname=os.path.basename("unsupported_2.ext" ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> str: a__ : Dict = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] ) a__ : str = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( ) -> List[Any]: return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" ) @pytest.fixture(scope="session" ) def UpperCamelCase_ ( ) -> List[str]: return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" ) @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : int = tmp_path_factory.mktemp("data" ) / "dataset.img.zip" with zipfile.ZipFile(__a , "w" ) as f: f.write(__a , arcname=os.path.basename(__a ) ) f.write(__a , arcname=os.path.basename(__a ).replace(".jpg" , "2.jpg" ) ) return path @pytest.fixture(scope="session" ) def UpperCamelCase_ ( __a ) -> Any: a__ : Optional[int] = tmp_path_factory.mktemp("data_dir" ) (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / "subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden file with open(data_dir / "subdir" / ".test.txt" , "w" ) as f: f.write("bar\n" * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt" , "w" ) as f: f.write("foo\n" * 10 ) with open(data_dir / ".subdir" / "test.txt" , "w" ) as f: f.write("bar\n" * 10 ) return data_dir
37
def UpperCamelCase_ ( __a = 50 ) -> int: a__ : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
37
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCamelCase : Dict = None UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : List[str] = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : Optional[Any] = { """facebook/nllb-large-en-ro""": 1024, """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off UpperCamelCase : int = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = ['input_ids', 'attention_mask'] _lowercase = NllbTokenizer _lowercase = [] _lowercase = [] def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Optional[Any]="</s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Union[str, Any]="<mask>" , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it a__ : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token a__ : Optional[int] = legacy_behaviour super().__init__( vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , legacy_behaviour=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : Union[str, Any] = vocab_file a__ : Optional[Any] = False if not self.vocab_file else True a__ : Any = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) a__ : Dict = { lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } a__ : List[Any] = src_lang if src_lang is not None else "eng_Latn" a__ : Tuple = self.convert_tokens_to_ids(self._src_lang ) a__ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _UpperCamelCase( self : str ): return self._src_lang @src_lang.setter def _UpperCamelCase( self : Any , lowerCamelCase__ : str ): a__ : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Union[str, Any] = [self.sep_token_id] a__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] , lowerCamelCase__ : Optional[str] , **lowerCamelCase__ : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) a__ : Tuple = src_lang a__ : List[str] = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) a__ : Optional[int] = self.convert_tokens_to_ids(lowerCamelCase__ ) a__ : Optional[int] = tgt_lang_id return inputs def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str = "eng_Latn" , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "fra_Latn" , **lowerCamelCase__ : Dict , ): a__ : Optional[Any] = src_lang a__ : Any = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCamelCase( self : str ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCamelCase( self : str , lowerCamelCase__ : Tuple ): a__ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: a__ : List[Any] = [] a__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: a__ : List[str] = [self.cur_lang_code] a__ : Any = [self.eos_token_id] a__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens ) a__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) a__ : Any = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : str ): a__ : List[Any] = self.convert_tokens_to_ids(lowerCamelCase__ ) if self.legacy_behaviour: a__ : Optional[Any] = [] a__ : List[str] = [self.eos_token_id, self.cur_lang_code] else: a__ : Optional[int] = [self.cur_lang_code] a__ : List[Any] = [self.eos_token_id] a__ : Any = self.convert_ids_to_tokens(self.prefix_tokens ) a__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) a__ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCamelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return a__ : int = os.path.join( lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
37
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCamelCase : Tuple = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" def __init__( self : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Union[str, Any] ): warnings.warn( "The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use FlavaImageProcessor instead." , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
37
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class A__ ( A__ ): """simple docstring""" def __init__( self : Dict , lowerCamelCase__ : Union[str, "sqlalchemy.sql.Selectable"] , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Optional[int] , ): super().__init__(features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , **lowerCamelCase__ ) a__ : str = Sql( cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , sql=lowerCamelCase__ , con=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCamelCase( self : Tuple ): a__ : Optional[Any] = None a__ : Dict = None a__ : Union[str, Any] = None a__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , ) # Build dataset for splits a__ : List[str] = self.builder.as_dataset( split="train" , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory ) return dataset class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : Dataset , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Optional[Any] , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) a__ : Any = dataset a__ : str = name a__ : Tuple = con a__ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE a__ : Any = num_proc a__ : Tuple = to_sql_kwargs def _UpperCamelCase( self : List[Any] ): a__ : Any = self.to_sql_kwargs.pop("sql" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("con" , lowerCamelCase__ ) a__ : int = self.to_sql_kwargs.pop("index" , lowerCamelCase__ ) a__ : int = self._write(index=lowerCamelCase__ , **self.to_sql_kwargs ) return written def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] ): a__, a__, a__ : Union[str, Any] = args a__ : Any = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs a__ : Tuple = query_table( table=self.dataset.data , key=slice(lowerCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) a__ : str = batch.to_pandas() a__ : List[Any] = df.to_sql(self.name , self.con , index=lowerCamelCase__ , **lowerCamelCase__ ) return num_rows or len(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : str = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: a__, a__ : List[str] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCamelCase__ , lowerCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
37
1
from __future__ import annotations from typing import Generic, TypeVar UpperCamelCase : Optional[Any] = TypeVar("""T""") class A__ ( Generic[T] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : T ): a__ : int = data a__ : List[Any] = self a__ : Optional[Any] = 0 class A__ ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ): # map from node name to the node object a__ : dict[T, DisjointSetTreeNode[T]] = {} def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : T ): # create a new set with x as its member a__ : str = DisjointSetTreeNode(lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : T ): # find the set x belongs to (with path-compression) a__ : Optional[Any] = self.map[data] if elem_ref != elem_ref.parent: a__ : Optional[int] = self.find_set(elem_ref.parent.data ) return elem_ref.parent def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : DisjointSetTreeNode[T] , lowerCamelCase__ : DisjointSetTreeNode[T] ): # helper function for union operation if nodea.rank > nodea.rank: a__ : Tuple = nodea else: a__ : Any = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def _UpperCamelCase( self : List[str] , lowerCamelCase__ : T , lowerCamelCase__ : T ): # merge 2 disjoint sets self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) ) class A__ ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ): # connections: map from the node to the neighbouring nodes (with weights) a__ : dict[T, dict[T, int]] = {} def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : T ): # add a node ONLY if its not present in the graph if node not in self.connections: a__ : List[str] = {} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : T , lowerCamelCase__ : T , lowerCamelCase__ : int ): # add an edge with the given weight self.add_node(lowerCamelCase__ ) self.add_node(lowerCamelCase__ ) a__ : Tuple = weight a__ : Union[str, Any] = weight def _UpperCamelCase( self : Union[str, Any] ): a__ : List[Any] = [] a__ : Dict = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda lowerCamelCase__ : x[2] ) # creating the disjoint set a__ : Optional[Any] = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(lowerCamelCase__ ) # MST generation a__ : List[Any] = 0 a__ : Union[str, Any] = 0 a__ : Dict = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a__, a__, a__ : str = edges[index] index += 1 a__ : str = disjoint_set.find_set(lowerCamelCase__ ) a__ : List[str] = disjoint_set.find_set(lowerCamelCase__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ ) return graph
37
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase : Optional[int] = logging.get_logger(__name__) class A__ ( A__ ): """simple docstring""" def __init__( self : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ): warnings.warn( "The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use SegformerImageProcessor instead." , lowerCamelCase__ , ) super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
37
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def UpperCamelCase_ ( __a ) -> Union[str, Any]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase__ : nn.Module , lowerCamelCase__ : int ): super().__init__() a__ : int = module a__ : Any = nn.Sequential( nn.Linear(module.in_features , lowerCamelCase__ , bias=lowerCamelCase__ ) , nn.Linear(lowerCamelCase__ , module.out_features , bias=lowerCamelCase__ ) , ) a__ : Tuple = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase__ ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ): return self.module(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) + self.adapter(lowerCamelCase__ ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" _lowercase = 'bigscience/bloom-1b7' # Constant values _lowercase = 2.1_09_65_95_52_69_25_74 _lowercase = 'Hello my name is' _lowercase = set() EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' ) EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' ) EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' ) _lowercase = 1_0 def _UpperCamelCase( self : Dict ): # Models and tokenizer a__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Union[str, Any] ): super().setUp() # Models and tokenizer a__ : List[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="auto" ) a__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : List[Any] ): a__ : str = self.model_abit.config self.assertTrue(hasattr(lowerCamelCase__ , "quantization_config" ) ) a__ : Optional[Any] = config.to_dict() a__ : int = config.to_diff_dict() a__ : List[str] = config.to_json_string() def _UpperCamelCase( self : int ): from bitsandbytes.nn import Paramsabit a__ : List[Any] = self.model_fpaa.get_memory_footprint() a__ : str = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) a__ : Optional[Any] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCamelCase( self : Tuple ): from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(lowerCamelCase__ , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCamelCase( self : str ): a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Tuple = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[Any] = BitsAndBytesConfig() a__ : Optional[int] = True a__ : int = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , device_map="auto" ) a__ : str = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : int = model_abit_from_config.generate( input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) def _UpperCamelCase( self : Dict ): with self.assertRaises(lowerCamelCase__ ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = BitsAndBytesConfig() with self.assertRaises(lowerCamelCase__ ): a__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , device_map="auto" , bnb_abit_quant_type="nf4" , ) def _UpperCamelCase( self : int ): with self.assertRaises(lowerCamelCase__ ): # Tries with `str` self.model_abit.to("cpu" ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.to(torch.device("cuda:0" ) ) with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.float() with self.assertRaises(lowerCamelCase__ ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything a__ : int = self.tokenizer(self.input_text , return_tensors="pt" ) a__ : Any = self.model_fpaa.to(torch.floataa ) a__ : List[Any] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.to("cpu" ) # Check this does not throw an error a__ : Tuple = self.model_fpaa.half() # Check this does not throw an error a__ : Dict = self.model_fpaa.float() def _UpperCamelCase( self : Dict ): a__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase__ , device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): """simple docstring""" @classmethod def _UpperCamelCase( cls : str ): a__ : Dict = "t5-small" a__ : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense a__ : int = AutoTokenizer.from_pretrained(cls.model_name ) a__ : str = "Translate in German: Hello, my dog is cute" def _UpperCamelCase( self : Optional[int] ): gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Optional[int] ): from transformers import TaForConditionalGeneration a__ : List[Any] = TaForConditionalGeneration._keep_in_fpaa_modules a__ : Optional[Any] = None # test with `t5-small` a__ : Any = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Any = model.generate(**lowerCamelCase__ ) a__ : Union[str, Any] = modules def _UpperCamelCase( self : List[Any] ): import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` a__ : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) a__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : int = model.generate(**lowerCamelCase__ ) # test with `flan-t5-small` a__ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) a__ : Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 ) a__ : Optional[int] = model.generate(**lowerCamelCase__ ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : List[str] ): super().setUp() # model_name a__ : Union[str, Any] = "bigscience/bloom-560m" a__ : Union[str, Any] = "t5-small" # Different types of model a__ : int = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Sequence classification model a__ : Dict = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # CausalLM model a__ : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) # Seq2seq model a__ : Dict = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=lowerCamelCase__ , device_map="auto" ) def _UpperCamelCase( self : List[Any] ): del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Union[str, Any] ): from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): super().setUp() def _UpperCamelCase( self : int ): del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Tuple ): a__ : int = pipeline( "text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass a__ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Tuple ): super().setUp() def _UpperCamelCase( self : List[Any] ): a__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=lowerCamelCase__ , device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model a__ : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ) # Second real batch a__ : List[Any] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase__ ) , self.EXPECTED_OUTPUTS ) class A__ ( A__ ): """simple docstring""" def _UpperCamelCase( self : Dict ): a__ : Any = "facebook/opt-350m" super().setUp() def _UpperCamelCase( self : int ): if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ): return # Step 1: freeze all parameters a__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase__ ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): a__ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability a__ : Tuple = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(lowerCamelCase__ ) ): a__ : Dict = LoRALayer(module.q_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.k_proj , rank=16 ) a__ : List[Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch a__ : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): a__ : Optional[Any] = model.forward(**lowerCamelCase__ ) out.logits.norm().backward() for module in model.modules(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(lowerCamelCase__ , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( A__ ): """simple docstring""" _lowercase = 'gpt2-xl' _lowercase = 3.31_91_85_48_54_15_21_87
37
1
import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = WavaVecaPhonemeCTCTokenizer _lowercase = False def _UpperCamelCase( self : Optional[int] ): super().setUp() a__ : Optional[Any] = ( "<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː " "ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː " "ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 " "oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ " "pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ " "yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ " "əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ " "ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ " "ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ " "uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ " "ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ " "ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ " "ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4" ).split(" " ) a__ : str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Any = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"} a__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) def _UpperCamelCase( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=False , lowerCamelCase__ : Tuple=20 , lowerCamelCase__ : Any=5 ): a__ : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase__ )) for i in range(len(lowerCamelCase__ ) )] a__ : Optional[Any] = list(filter(lambda lowerCamelCase__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase__ ) , lowerCamelCase__ ) ) if max_length is not None and len(lowerCamelCase__ ) > max_length: a__ : Dict = toks[:max_length] if min_length is not None and len(lowerCamelCase__ ) < min_length and len(lowerCamelCase__ ) > 0: while len(lowerCamelCase__ ) < min_length: a__ : Any = toks + toks # toks_str = [t[1] for t in toks] a__ : int = [t[0] for t in toks] # Ensure consistency a__ : Any = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) if " " not in output_txt and len(lowerCamelCase__ ) > 1: a__ : List[Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase__ ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase__ ) ) if with_prefix_space: a__ : int = " " + output_txt a__ : Any = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) return output_txt, output_ids def _UpperCamelCase( self : Tuple , **lowerCamelCase__ : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) # check adding a single token tokenizer.add_tokens("xxx" ) a__ : Any = tokenizer("m xxx ɪ" , do_phonemize=lowerCamelCase__ ).input_ids self.assertEqual(lowerCamelCase__ , [13, 392, 17] ) # xxx should be last token tokenizer.add_tokens(["aaa", "bbb", "ccc"] ) a__ : int = tokenizer("m aaa ɪ ccc" , do_phonemize=lowerCamelCase__ ).input_ids self.assertEqual(lowerCamelCase__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa a__ : str = tokenizer("maɪ c" , do_phonemize=lowerCamelCase__ ).input_ids self.assertEqual(lowerCamelCase__ , [3, 200] ) # mai should be <unk> (=3) def _UpperCamelCase( self : Union[str, Any] ): a__ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) a__ : List[Any] = "Hello how are you" a__ : str = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) self.assertEqual(lowerCamelCase__ , "h ə l oʊ h aʊ ɑːɹ j uː" ) def _UpperCamelCase( self : Any ): a__ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) a__ : int = "Hello how are you" a__ : int = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(lowerCamelCase__ ).input_ids , tokenizer(lowerCamelCase__ , do_phonemize=lowerCamelCase__ ).input_ids ) def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) a__ : Optional[Any] = "Hello how are you" a__ : Optional[Any] = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) a__ : str = tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) a__ : Any = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] a__ : List[str] = tokenizer.decode(sample_ids[0] ) a__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , batch_tokens[0] ) self.assertEqual(lowerCamelCase__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) def _UpperCamelCase( self : List[Any] ): a__ : Union[str, Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) a__ : int = "Hello how are you" a__ : Any = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) self.assertEqual(lowerCamelCase__ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" ) def _UpperCamelCase( self : Optional[Any] ): a__ : Union[str, Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) a__ : Any = "Hello how are you" a__ : Dict = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) self.assertEqual(tokenizer(lowerCamelCase__ ).input_ids , tokenizer(lowerCamelCase__ , do_phonemize=lowerCamelCase__ ).input_ids ) def _UpperCamelCase( self : Union[str, Any] ): a__ : str = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off a__ : Dict = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter a__ : Any = tokenizer.decode(sample_ids[0] ) a__ : Tuple = tokenizer.batch_decode(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , batch_tokens[0] ) self.assertEqual(lowerCamelCase__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] ) # decode with no word_del_token filter a__ : Optional[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase__ ) a__ : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ , filter_word_delimiter_token=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , batch_tokens[0] ) self.assertEqual(lowerCamelCase__ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] ) def _UpperCamelCase( self : Any ): a__ : Optional[Any] = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) a__ : Optional[int] = "Hello how are you" a__ : int = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) a__ : List[str] = tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids , filter_word_delimiter_token=lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : Tuple = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" ) tokenizer.add_tokens("|" ) a__ : List[Any] = "Hello how are you" a__ : int = tokenizer.phonemize(lowerCamelCase__ , phonemizer_lang="en-us" ) a__ : List[str] = tokenizer.decode(tokenizer(lowerCamelCase__ ).input_ids , filter_word_delimiter_token=lowerCamelCase__ ) self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : int = self.tokenizer_class.from_pretrained( "facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=lowerCamelCase__ ) a__ : Optional[Any] = "Hello how are you" a__ : Union[str, Any] = tokenizer(lowerCamelCase__ , phonemizer_lang="en-us" ).input_ids a__ : List[str] = tokenizer(lowerCamelCase__ , phonemizer_lang="fr-fr" ).input_ids self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = tokenizer.decode(lowerCamelCase__ ) a__ : Optional[int] = tokenizer.decode(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , "h ə l oʊ h aʊ ɑːɹ j uː" ) self.assertEqual(lowerCamelCase__ , "ɛ l o h aʊ a ʁ j u" ) def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) a__ : Dict = "Hello how Are you" a__ : Optional[Any] = "hello how are you" a__ : Any = tokenizer(lowerCamelCase__ ).input_ids a__ : Dict = tokenizer(lowerCamelCase__ ).input_ids self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): a__ : Union[str, Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" ) tokenizer.add_tokens(["!", "?"] ) tokenizer.add_special_tokens({"cls_token": "$$$"} ) # fmt: off a__ : str = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394], ] # fmt: on a__ : Dict = tokenizer.batch_decode(lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] ) @staticmethod def _UpperCamelCase( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ): a__ : Optional[int] = [d[key] for d in offsets] return retrieved_list def _UpperCamelCase( self : str ): a__ : List[str] = self.get_tokenizer(word_delimiter_token="|" ) tokenizer.add_tokens("|" ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" a__ : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on a__ : List[Any] = tokenizer.decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ , filter_word_delimiter_token=lowerCamelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue("text" in outputs ) self.assertTrue("char_offsets" in outputs ) self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.get_tokenizer(word_delimiter_token="|" ) def check_list_tuples_equal(lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ): self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) ) self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase__ ) ) # transform list to ModelOutput a__ : Tuple = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] ) def recursive_check(lowerCamelCase__ : Any , lowerCamelCase__ : int ): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): [recursive_check(lowerCamelCase__ , lowerCamelCase__ ) for la, la in zip(lowerCamelCase__ , lowerCamelCase__ )] self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] ) # fmt: off a__ : List[str] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char a__ : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ ) a__ : Dict = [tokenizer.decode(lowerCamelCase__ , output_char_offsets=lowerCamelCase__ ) for ids in sample_ids] check_list_tuples_equal(lowerCamelCase__ , lowerCamelCase__ ) @unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" ) def _UpperCamelCase( self : Optional[Any] ): pass @unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" ) def _UpperCamelCase( self : str ): pass @unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" ) def _UpperCamelCase( self : Optional[int] ): pass def _UpperCamelCase( self : Dict ): a__ : List[str] = self.get_tokenizers(do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : int = tokenizer.vocab_size a__ : List[Any] = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) a__ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"] a__ : Any = tokenizer.add_tokens(lowerCamelCase__ ) a__ : List[Any] = tokenizer.vocab_size a__ : Dict = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , all_size + len(lowerCamelCase__ ) ) a__ : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=lowerCamelCase__ ) self.assertGreaterEqual(len(lowerCamelCase__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) a__ : str = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} a__ : int = tokenizer.add_special_tokens(lowerCamelCase__ ) a__ : int = tokenizer.vocab_size a__ : Dict = len(lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , 0 ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , len(lowerCamelCase__ ) ) self.assertEqual(lowerCamelCase__ , all_size_a + len(lowerCamelCase__ ) ) a__ : List[str] = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=lowerCamelCase__ ) self.assertGreaterEqual(len(lowerCamelCase__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _UpperCamelCase( self : Dict ): pass @unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." ) def _UpperCamelCase( self : Union[str, Any] ): pass def _UpperCamelCase( self : List[Any] ): # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. a__ : Any = self.get_tokenizers(fast=lowerCamelCase__ , do_lower_case=lowerCamelCase__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): a__ : int = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"] a__ : List[str] = tokenizer.convert_tokens_to_string(lowerCamelCase__ ) self.assertIsInstance(output["text"] , lowerCamelCase__ )
37
import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ): a__ : Dict = parent a__ : Dict = 100 a__ : Optional[int] = batch_size a__ : Union[str, Any] = image_size a__ : Any = patch_size a__ : Optional[Any] = num_channels a__ : int = is_training a__ : List[str] = use_labels a__ : Optional[Any] = hidden_size a__ : List[Any] = num_hidden_layers a__ : str = num_attention_heads a__ : str = intermediate_size a__ : int = hidden_act a__ : List[Any] = hidden_dropout_prob a__ : Dict = attention_probs_dropout_prob a__ : Union[str, Any] = type_sequence_label_size a__ : Optional[Any] = initializer_range a__ : List[str] = scope a__ : int = out_indices a__ : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) a__ : Optional[int] = (image_size // patch_size) ** 2 a__ : Union[str, Any] = num_patches + 1 def _UpperCamelCase( self : int ): a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Optional[Any] = None a__ : Tuple = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) a__ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCamelCase( self : Tuple ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ): a__ : str = BeitModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ): a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[Any] = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ): a__ : List[str] = self.type_sequence_label_size a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images a__ : Optional[Any] = 1 a__ : List[str] = BeitForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): a__ : int = self.num_labels a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _UpperCamelCase( self : Optional[int] ): a__ : Any = self.prepare_config_and_inputs() a__, a__, a__, a__ : Union[str, Any] = config_and_inputs a__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _lowercase = ( { 'feature-extraction': BeitModel, 'image-classification': BeitForImageClassification, 'image-segmentation': BeitForSemanticSegmentation, } if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Any ): a__ : int = BeitModelTester(self ) a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def _UpperCamelCase( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase( self : Dict ): pass def _UpperCamelCase( self : Optional[Any] ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : str ): a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : int = model_class(lowerCamelCase__ ) a__ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _UpperCamelCase( self : int ): a__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): if not self.model_tester.is_training: return a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : str = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]: continue a__ : List[str] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : Tuple = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return a__ : List[Any] = False a__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.gradient_checkpointing_enable() model.to(lowerCamelCase__ ) model.train() a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) a__ : int = model(**lowerCamelCase__ ).loss loss.backward() def _UpperCamelCase( self : List[str] ): a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() a__ : Dict = _config_zero_init(lowerCamelCase__ ) for model_class in self.all_model_classes: a__ : str = model_class(config=lowerCamelCase__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _UpperCamelCase( self : Optional[int] ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : Optional[int] ): return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def _UpperCamelCase( self : str ): a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ ) a__ : Optional[Any] = self.default_image_processor a__ : Dict = prepare_img() a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ ) # prepare bool_masked_pos a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ ) a__ : Tuple = outputs.logits # verify the logits a__ : List[str] = torch.Size((1, 196, 8_192) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[int] = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) ) @slow def _UpperCamelCase( self : Dict ): a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ ) a__ : int = self.default_image_processor a__ : List[Any] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Union[str, Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Tuple = 281 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : Any ): a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( lowerCamelCase__ ) a__ : str = self.default_image_processor a__ : List[str] = prepare_img() a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Dict = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Optional[int] = torch.Size((1, 21_841) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) ) a__ : Optional[Any] = 2_396 self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : Tuple = model.to(lowerCamelCase__ ) a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : Union[str, Any] = Image.open(ds[0]["file"] ) a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Optional[Any] = model(**lowerCamelCase__ ) a__ : List[str] = outputs.logits # verify the logits a__ : Tuple = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , lowerCamelCase__ ) a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: a__ : Dict = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=lowerCamelCase__ , ) else: a__ : Dict = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=lowerCamelCase__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _UpperCamelCase( self : Tuple ): a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) a__ : List[Any] = model.to(lowerCamelCase__ ) a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ ) a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) a__ : str = Image.open(ds[0]["file"] ) a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : List[Any] = model(**lowerCamelCase__ ) a__ : Any = outputs.logits.detach().cpu() a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] ) a__ : Optional[int] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ ) a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ ) a__ : Any = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
37
1
from __future__ import annotations import math def UpperCamelCase_ ( __a , __a , __a , __a , __a ) -> int: if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__a ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) return min( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) def UpperCamelCase_ ( ) -> None: a__ : Optional[Any] = [90, 23, 6, 33, 21, 65, 123, 34_423] a__ : Optional[int] = math.log(len(__a ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , __a , __a , __a ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
37
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) def UpperCamelCase_ ( __a ) -> Union[str, Any]: a__ : Tuple = R"\w+[.]\d+" a__ : List[Any] = re.findall(__a , __a ) for pat in pats: a__ : Union[str, Any] = key.replace(__a , "_".join(pat.split("." ) ) ) return key def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : List[str] = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): a__ : Any = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: a__ : Optional[Any] = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: a__ : Union[str, Any] = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer a__ : List[str] = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: a__ : str = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer a__ : Tuple = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": a__ : Tuple = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight a__ : Optional[Any] = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias a__ : Union[str, Any] = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def UpperCamelCase_ ( __a , __a , __a=42 ) -> str: # Step 1: Convert pytorch tensor to numpy a__ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params a__ : Tuple = flax_model.init_weights(PRNGKey(__a ) ) a__ : Optional[Any] = flatten_dict(__a ) a__ : Union[str, Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): a__ : Optional[int] = rename_key(__a ) a__ : Optional[int] = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters a__, a__ : Union[str, Any] = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown a__ : str = jnp.asarray(__a ) return unflatten_dict(__a )
37
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = DebertaVaTokenizer _lowercase = DebertaVaTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing a__ : Optional[int] = DebertaVaTokenizer(lowerCamelCase__ , unk_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Dict ): a__ : Optional[int] = "this is a test" a__ : Dict = "this is a test" return input_text, output_text def _UpperCamelCase( self : Optional[Any] ): a__ : int = "<pad>" a__ : List[str] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "[PAD]" ) self.assertEqual(len(lowerCamelCase__ ) , 30_001 ) def _UpperCamelCase( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def _UpperCamelCase( self : Optional[int] ): # fmt: off a__ : List[str] = " \tHeLLo!how \n Are yoU? " a__ : Any = ["▁hello", "!", "how", "▁are", "▁you", "?"] # fmt: on a__ : Tuple = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ ) a__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ ) a__ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _UpperCamelCase( self : Any ): pass @unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." ) def _UpperCamelCase( self : Optional[int] ): pass def _UpperCamelCase( self : Optional[Any] ): # fmt: off a__ : Optional[int] = "I was born in 92000, and this is falsé." a__ : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a__ : Any = DebertaVaTokenizer(lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : int = "I was born in 92000, and this is falsé." a__ : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a__ : Tuple = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] ): # fmt: off a__ : int = "I was born in 92000, and this is falsé." a__ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on a__ : Dict = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Dict = "I was born in 92000, and this is falsé." a__ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ] # fmt: on a__ : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : List[Any] = " \tHeLLo!how \n Are yoU? " a__ : List[str] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"] # fmt: on a__ : Tuple = DebertaVaTokenizer(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Tuple = DebertaVaTokenizerFast(lowerCamelCase__ , do_lower_case=lowerCamelCase__ , split_by_punct=lowerCamelCase__ ) a__ : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): a__ : Tuple = self.get_tokenizer() a__ : List[Any] = self.get_rust_tokenizer() a__ : Optional[Any] = "I was born in 92000, and this is falsé." a__ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) a__ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Optional[int] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : str = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Tuple = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = "This is a test" a__ : str = [13, 1, 4_398, 25, 21, 1_289] a__ : Any = ["▁", "T", "his", "▁is", "▁a", "▁test"] a__ : int = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"] a__ : List[str] = DebertaVaTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # fmt: off a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[Any] = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] a__ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ] a__ : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ] # fmt: on a__ : Union[str, Any] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Tuple = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : str = DebertaVaTokenizer(lowerCamelCase__ ) a__ : int = tokenizer.encode("sequence builders" ) a__ : Optional[int] = tokenizer.encode("multi-sequence build" ) a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) a__ : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase__ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase__ , ) @slow def _UpperCamelCase( self : Union[str, Any] ): # fmt: off a__ : str = {"input_ids": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
37
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def UpperCamelCase_ ( ) -> int: a__ : Any = HfArgumentParser(__a ) a__ : Any = parser.parse_args_into_dataclasses()[0] a__ : Optional[int] = TensorFlowBenchmark(args=__a ) try: a__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead." a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] ) a__ : str = "" a__ : List[Any] = eval(str(__a ).split(" " )[-1] ) a__ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: a__ : Tuple = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
37
1
import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def UpperCamelCase_ ( __a ) -> List[Tuple[int, ...]]: a__ : Any = [] if isinstance(__a , __a ): for v in tree.values(): shapes.extend(_fetch_dims(__a ) ) elif isinstance(__a , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(__a ) ) elif isinstance(__a , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError("Not supported" ) return shapes @torch.jit.ignore def UpperCamelCase_ ( __a , __a ) -> Tuple[int, ...]: a__ : Tuple = [] for d in reversed(__a ): idx.append(flat_idx % d ) a__ : Tuple = flat_idx // d return tuple(reversed(__a ) ) @torch.jit.ignore def UpperCamelCase_ ( __a , __a , __a , __a = None , __a = None , ) -> List[Tuple[slice, ...]]: # start_edges and end_edges both indicate whether, starting from any given # dimension, the start/end index is at the top/bottom edge of the # corresponding tensor, modeled as a tree def reduce_edge_list(__a ) -> None: a__ : List[str] = True for i in range(len(__a ) ): a__ : Tuple = -1 * (i + 1) l[reversed_idx] &= tally a__ : Optional[int] = l[reversed_idx] if start_edges is None: a__ : Union[str, Any] = [s == 0 for s in start] reduce_edge_list(__a ) if end_edges is None: a__ : List[Any] = [e == (d - 1) for e, d in zip(__a , __a )] reduce_edge_list(__a ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(__a ) == 0: return [()] elif len(__a ) == 1: return [(slice(start[0] , end[0] + 1 ),)] a__ : List[Tuple[slice, ...]] = [] a__ : List[slice] = [] # Dimensions common to start and end can be selected directly for s, e in zip(__a , __a ): if s == e: path_list.append(slice(__a , s + 1 ) ) else: break a__ : Tuple[slice, ...] = tuple(__a ) a__ : Optional[int] = len(__a ) # start == end, and we're done if divergence_idx == len(__a ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : str = start[divergence_idx] return tuple( path + (slice(__a , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None a__ : Dict = end[divergence_idx] return tuple( path + (slice(__a , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) a__ : Any = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def UpperCamelCase_ ( __a , __a , __a , __a ) -> torch.Tensor: a__ : str = t.shape[:no_batch_dims] a__ : List[str] = list(_flat_idx_to_idx(__a , __a ) ) # _get_minimal_slice_set is inclusive a__ : Union[str, Any] = list(_flat_idx_to_idx(flat_end - 1 , __a ) ) # Get an ordered list of slices to perform a__ : Tuple = _get_minimal_slice_set( __a , __a , __a , ) a__ : Dict = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def UpperCamelCase_ ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ) -> Any: if not (len(__a ) > 0): raise ValueError("Must provide at least one input" ) a__ : Optional[Any] = [shape[:no_batch_dims] for shape in _fetch_dims(__a )] a__ : Optional[int] = tuple([max(__a ) for s in zip(*__a )] ) def _prep_inputs(__a ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: a__ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) a__ : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: a__ : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , __a ) a__ : Tuple = None if _out is not None: a__ : Tuple = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) a__ : Optional[int] = 1 for d in orig_batch_dims: flat_batch_dim *= d a__ : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(__a ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t a__ : List[str] = 0 a__ : Tuple = prepped_outputs for _ in range(__a ): # Chunk the input if not low_mem: a__ : int = _select_chunk else: a__ : List[str] = partial( _chunk_slice , flat_start=__a , flat_end=min(__a , i + chunk_size ) , no_batch_dims=len(__a ) , ) a__ : Dict[str, Any] = tensor_tree_map(__a , __a ) # Run the layer on the chunk a__ : List[Any] = layer(**__a ) # Allocate space for the output if out is None: a__ : List[str] = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __a ) # Put the chunk in its pre-allocated space if isinstance(__a , __a ): def assign(__a , __a ) -> None: for k, v in da.items(): if isinstance(__a , __a ): assign(__a , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: a__ : Optional[Any] = da[k] assign(__a , __a ) elif isinstance(__a , __a ): for xa, xa in zip(__a , __a ): if _add_into_out: xa[i : i + chunk_size] += xa else: a__ : str = xa elif isinstance(__a , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: a__ : Union[str, Any] = output_chunk else: raise ValueError("Not supported" ) i += chunk_size a__ : Any = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __a ) return out class A__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : int = 512 , ): a__ : Tuple = max_chunk_size a__ : Optional[int] = None a__ : Optional[tuple] = None def _UpperCamelCase( self : str , lowerCamelCase__ : Callable , lowerCamelCase__ : tuple , lowerCamelCase__ : int ): logging.info("Tuning chunk size..." ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] a__ : int = [c for c in candidates if c > min_chunk_size] a__ : Dict = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(lowerCamelCase__ : int ) -> bool: try: with torch.no_grad(): fn(*lowerCamelCase__ , chunk_size=lowerCamelCase__ ) return True except RuntimeError: return False a__ : List[str] = 0 a__ : Any = len(lowerCamelCase__ ) - 1 while i > min_viable_chunk_size_index: a__ : str = test_chunk_size(candidates[i] ) if not viable: a__ : List[str] = (min_viable_chunk_size_index + i) // 2 else: a__ : Tuple = i a__ : str = (i + len(lowerCamelCase__ ) - 1) // 2 return candidates[min_viable_chunk_size_index] def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Iterable , lowerCamelCase__ : Iterable ): a__ : Union[str, Any] = True for aa, aa in zip(lowerCamelCase__ , lowerCamelCase__ ): assert type(lowerCamelCase__ ) == type(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , (list, tuple) ): consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ ) elif isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : int = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )] a__ : List[str] = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )] consistent &= self._compare_arg_caches(lowerCamelCase__ , lowerCamelCase__ ) else: consistent &= aa == aa return consistent def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Callable , lowerCamelCase__ : tuple , lowerCamelCase__ : int , ): a__ : List[str] = True a__ : tuple = tree_map(lambda lowerCamelCase__ : a.shape if isinstance(lowerCamelCase__ , torch.Tensor ) else a , lowerCamelCase__ , lowerCamelCase__ ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(lowerCamelCase__ ) a__ : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , lowerCamelCase__ ) else: # Otherwise, we can reuse the precomputed value a__ : Union[str, Any] = False if not consistent: a__ : str = self._determine_favorable_chunk_size( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) a__ : List[str] = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
37
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase : Optional[int] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCamelCase_ ( __a ) -> Any: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCamelCase_ ( __a , __a , __a ) -> Any: return max(metric_fn(__a , __a ) for gt in ground_truths ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [] if args.gold_data_mode == "qa": a__ : Any = pd.read_csv(__a , sep="\t" , header=__a ) for answer_list in data[1]: a__ : Union[str, Any] = ast.literal_eval(__a ) answers.append(__a ) else: a__ : List[str] = [line.strip() for line in open(__a , "r" ).readlines()] a__ : List[str] = [[reference] for reference in references] a__ : List[str] = 0 for prediction, ground_truths in zip(__a , __a ): total += 1 em += metric_max_over_ground_truths(__a , __a , __a ) fa += metric_max_over_ground_truths(__a , __a , __a ) a__ : Dict = 100.0 * em / total a__ : Optional[Any] = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = args.k a__ : str = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = [line.strip() for line in open(__a , "r" ).readlines()] a__ : Tuple = 0 for hypo, reference in zip(__a , __a ): a__ : Any = set(hypo.split("\t" )[:k] ) a__ : Union[str, Any] = set(reference.split("\t" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k a__ : Union[str, Any] = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: def strip_title(__a ): if title.startswith("\"" ): a__ : Optional[Any] = title[1:] if title.endswith("\"" ): a__ : Union[str, Any] = title[:-1] return title a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a , )["input_ids"].to(args.device ) a__ : Optional[int] = rag_model.rag.question_encoder(__a ) a__ : Union[str, Any] = question_enc_outputs[0] a__ : Optional[int] = rag_model.retriever( __a , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , ) a__ : List[Any] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) a__ : int = [] for docs in all_docs: a__ : Optional[int] = [strip_title(__a ) for title in docs["title"]] provenance_strings.append("\t".join(__a ) ) return provenance_strings def UpperCamelCase_ ( __a , __a , __a ) -> Dict: with torch.no_grad(): a__ : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __a , return_tensors="pt" , padding=__a , truncation=__a ) a__ : Any = inputs_dict.input_ids.to(args.device ) a__ : Dict = inputs_dict.attention_mask.to(args.device ) a__ : Optional[int] = rag_model.generate( # rag_model overwrites generate __a , attention_mask=__a , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__a , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) a__ : int = rag_model.retriever.generator_tokenizer.batch_decode(__a , skip_special_tokens=__a ) if args.print_predictions: for q, a in zip(__a , __a ): logger.info("Q: {} - A: {}".format(__a , __a ) ) return answers def UpperCamelCase_ ( ) -> List[str]: a__ : int = argparse.ArgumentParser() parser.add_argument( "--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__a , help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ) , ) parser.add_argument( "--index_name" , default=__a , choices=["exact", "compressed", "legacy"] , type=__a , help="RAG model retriever type" , ) parser.add_argument( "--index_path" , default=__a , type=__a , help="Path to the retrieval index" , ) parser.add_argument("--n_docs" , default=5 , type=__a , help="Number of retrieved docs" ) parser.add_argument( "--model_name_or_path" , default=__a , type=__a , required=__a , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , ) parser.add_argument( "--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__a , help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ) , ) parser.add_argument("--k" , default=1 , type=__a , help="k for the precision@k calculation" ) parser.add_argument( "--evaluation_set" , default=__a , type=__a , required=__a , help="Path to a file containing evaluation samples" , ) parser.add_argument( "--gold_data_path" , default=__a , type=__a , required=__a , help="Path to a tab-separated file with gold samples" , ) parser.add_argument( "--gold_data_mode" , default="qa" , type=__a , choices=["qa", "ans"] , help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ) , ) parser.add_argument( "--predictions_path" , type=__a , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , ) parser.add_argument( "--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , ) parser.add_argument( "--eval_batch_size" , default=8 , type=__a , help="Batch size per GPU/CPU for evaluation." , ) parser.add_argument( "--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , ) parser.add_argument( "--num_beams" , default=4 , type=__a , help="Number of beams to be used when generating answers" , ) parser.add_argument("--min_length" , default=1 , type=__a , help="Min length of the generated answers" ) parser.add_argument("--max_length" , default=50 , type=__a , help="Max length of the generated answers" ) parser.add_argument( "--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , ) parser.add_argument( "--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , ) a__ : int = parser.parse_args() a__ : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) return args def UpperCamelCase_ ( __a ) -> Optional[int]: a__ : Tuple = {} if args.model_type is None: a__ : List[str] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("rag" ): a__ : int = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration a__ : Tuple = args.n_docs if args.index_name is not None: a__ : Any = args.index_name if args.index_path is not None: a__ : int = args.index_path else: a__ : Optional[Any] = BartForConditionalGeneration a__ : Tuple = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s" , __a ) a__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k a__ : Union[str, Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) ) score_fn(__a , args.predictions_path , args.gold_data_path ) continue logger.info("***** Running evaluation for {} *****".format(__a ) ) logger.info(" Batch size = %d" , args.eval_batch_size ) logger.info(" Predictions will be stored under {}".format(args.predictions_path ) ) if args.model_type.startswith("rag" ): a__ : str = RagRetriever.from_pretrained(__a , **__a ) a__ : Optional[int] = model_class.from_pretrained(__a , retriever=__a , **__a ) model.retriever.init_retrieval() else: a__ : Dict = model_class.from_pretrained(__a , **__a ) model.to(args.device ) with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file: a__ : List[Any] = [] for line in tqdm(__a ): questions.append(line.strip() ) if len(__a ) == args.eval_batch_size: a__ : Union[str, Any] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) + "\n" ) preds_file.flush() a__ : Any = [] if len(__a ) > 0: a__ : List[str] = evaluate_batch_fn(__a , __a , __a ) preds_file.write("\n".join(__a ) ) preds_file.flush() score_fn(__a , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase : List[Any] = get_args() main(args)
37
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class A__ ( A__ ): """simple docstring""" def __init__( self : Any , lowerCamelCase__ : str=0.01 , lowerCamelCase__ : str=1_000 ): a__ : Union[str, Any] = p_stop a__ : List[str] = max_length def __iter__( self : str ): a__ : Optional[int] = 0 a__ : Any = False while not stop and count < self.max_length: yield count count += 1 a__ : List[str] = random.random() < self.p_stop class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=True ): a__ : Union[str, Any] = [ BatchSamplerShard(lowerCamelCase__ , 2 , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) for i in range(2 ) ] a__ : Optional[int] = [list(lowerCamelCase__ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowerCamelCase__ ) for shard in batch_sampler_shards] , [len(lowerCamelCase__ ) for e in expected] ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): # Check the shards when the dataset is a round multiple of total batch size. a__ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) a__ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. a__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Union[str, Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Optional[int] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. a__ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) a__ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. a__ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) # Check the shards when the dataset is very small. a__ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : List[Any] = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [[], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): # Check the shards when the dataset is a round multiple of batch size. a__ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : str = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) a__ : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. a__ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Any = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) a__ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Optional[int] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. a__ : List[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) a__ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Tuple = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) # Check the shards when the dataset is very small. a__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Optional[int] = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) a__ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : str = [[], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): # Check the shards when the dataset is a round multiple of total batch size. a__ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Optional[Any] = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. a__ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : Any = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : str = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. a__ : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Any = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. a__ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : int = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Tuple = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is very small. a__ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : List[str] = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase__ ) a__ : Tuple = [[], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , even_batches=lowerCamelCase__ ) def _UpperCamelCase( self : Tuple ): # Check the shards when the dataset is a round multiple of batch size. a__ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Tuple = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCamelCase__ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size. a__ : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : List[Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. a__ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Dict = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Union[str, Any] = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) # Check the shards when the dataset is very small. a__ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : Union[str, Any] = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) a__ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : List[str] = [[], []] self.check_batch_sampler_shards(lowerCamelCase__ , lowerCamelCase__ , split_batches=lowerCamelCase__ , even_batches=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] a__ : Optional[Any] = [BatchSamplerShard(lowerCamelCase__ , 2 , lowerCamelCase__ , even_batches=lowerCamelCase__ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def _UpperCamelCase( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=False ): random.seed(lowerCamelCase__ ) a__ : int = list(lowerCamelCase__ ) a__ : Dict = [ IterableDatasetShard( lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=lowerCamelCase__ , num_processes=lowerCamelCase__ , process_index=lowerCamelCase__ , split_batches=lowerCamelCase__ , ) for i in range(lowerCamelCase__ ) ] a__ : Union[str, Any] = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowerCamelCase__ ) iterable_dataset_lists.append(list(lowerCamelCase__ ) ) a__ : Dict = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size a__ : Dict = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) ) self.assertTrue(len(lowerCamelCase__ ) % shard_batch_size == 0 ) a__ : List[Any] = [] for idx in range(0 , len(lowerCamelCase__ ) , lowerCamelCase__ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowerCamelCase__ ) < len(lowerCamelCase__ ): reference += reference self.assertListEqual(lowerCamelCase__ , reference[: len(lowerCamelCase__ )] ) def _UpperCamelCase( self : Tuple ): a__ : Union[str, Any] = 42 a__ : str = RandomIterableDataset() self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) # Edge case with a very small dataset a__ : Union[str, Any] = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) self.check_iterable_dataset_shards(lowerCamelCase__ , lowerCamelCase__ , batch_size=4 , drop_last=lowerCamelCase__ , split_batches=lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : int = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCamelCase__ ) a__ : str = SkipBatchSampler(lowerCamelCase__ , 2 ) self.assertListEqual(list(lowerCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _UpperCamelCase( self : List[Any] ): a__ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _UpperCamelCase( self : List[str] ): a__ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 ) a__ : Optional[int] = skip_first_batches(lowerCamelCase__ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def _UpperCamelCase( self : Any ): a__ : List[str] = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(lowerCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def _UpperCamelCase( self : Optional[Any] ): Accelerator() a__ : List[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(lowerCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCamelCase__ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
37
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str: a__ : int = {} if train_file is not None: a__ : int = [train_file] if eval_file is not None: a__ : Union[str, Any] = [eval_file] if test_file is not None: a__ : str = [test_file] a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a ) a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() ) a__ : str = features_name.pop(__a ) a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) a__ : str = {label: i for i, label in enumerate(__a )} a__ : Tuple = tokenizer.model_input_names a__ : List[str] = {} if len(__a ) == 1: for k in files.keys(): a__ : Optional[Any] = ds[k].map( lambda __a : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , ) elif len(__a ) == 2: for k in files.keys(): a__ : Dict = ds[k].map( lambda __a : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: a__ : str = {k: v for k, v in ex.items() if k in input_names} a__ : str = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: a__ : Tuple = {k: v for k, v in ex.items() if k in input_names} a__ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names} a__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) a__ : Optional[Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) a__ : Union[str, Any] = ( tf.data.Dataset.from_generator( __a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid UpperCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass class A__ : """simple docstring""" _lowercase = field(metadata={'help': 'Which column contains the label'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} ) _lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} ) _lowercase = field( default=1_2_8 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _lowercase = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class A__ : """simple docstring""" _lowercase = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _lowercase = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowercase = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def UpperCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) a__, a__, a__ : str = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a__ : Union[str, Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) a__, a__, a__, a__ : Optional[Any] = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) a__ : Optional[int] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): a__ : Any = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) def compute_metrics(__a ) -> Dict: a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer a__ : Dict = TFTrainer( model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation a__ : Optional[Any] = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) a__ : Dict = trainer.evaluate() a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__a , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(__a ) return results if __name__ == "__main__": main()
37
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _UpperCamelCase( self : List[Any] ): a__ : Any = 1 a__ : Optional[int] = 3 a__ : Optional[int] = (32, 32) a__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase__ ) return image @property def _UpperCamelCase( self : Union[str, Any] ): torch.manual_seed(0 ) a__ : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def _UpperCamelCase( self : Optional[Any] ): torch.manual_seed(0 ) a__ : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _UpperCamelCase( self : List[str] ): torch.manual_seed(0 ) a__ : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): def extract(*lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ): class A__ : """simple docstring""" def __init__( self : Tuple ): a__ : Dict = torch.ones([0] ) def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): self.pixel_values.to(lowerCamelCase__ ) return self return Out() return extract def _UpperCamelCase( self : Tuple ): a__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : Dict = self.dummy_cond_unet a__ : Optional[Any] = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , ) a__ : Tuple = self.dummy_vae a__ : Optional[int] = self.dummy_text_encoder a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk a__ : Tuple = StableDiffusionPipeline( unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , ) a__ : Any = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Tuple = "A painting of a squirrel eating a burger" a__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) a__ : List[Any] = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) a__ : List[Any] = output.images a__ : Optional[Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) a__ : Dict = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase__ , )[0] a__ : Tuple = image[0, -3:, -3:, -1] a__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a__ : Optional[Any] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase( self : Union[str, Any] ): a__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator a__ : Optional[Any] = self.dummy_cond_unet a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ ) a__ : Any = self.dummy_vae a__ : List[Any] = self.dummy_text_encoder a__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk a__ : Union[str, Any] = StableDiffusionPipeline( unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , ) a__ : int = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Dict = "A painting of a squirrel eating a burger" a__ : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) a__ : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) a__ : int = output.images a__ : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 ) a__ : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase__ , )[0] a__ : str = image[0, -3:, -3:, -1] a__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) a__ : Tuple = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase( self : Tuple ): a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=lowerCamelCase__ ) assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert isinstance(pipe.scheduler , lowerCamelCase__ ) assert pipe.safety_checker is None a__ : Optional[int] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowerCamelCase__ ) a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None a__ : Optional[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[Any] = self.dummy_cond_unet a__ : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase__ ) a__ : Tuple = self.dummy_vae a__ : str = self.dummy_text_encoder a__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 a__ : Union[str, Any] = unet.half() a__ : List[Any] = vae.half() a__ : Optional[int] = bert.half() # make sure here that pndm scheduler skips prk a__ : Tuple = StableDiffusionPipeline( unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=self.dummy_extractor , ) a__ : List[Any] = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : List[str] = "A painting of a squirrel eating a burger" a__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase( self : Any ): a__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCamelCase__ ) a__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : Optional[Any] = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Union[str, Any] = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) a__ : Optional[Any] = 4_003_660_346 a__ : List[str] = 7 # without safety guidance (sld_guidance_scale = 0) a__ : str = torch.manual_seed(lowerCamelCase__ ) a__ : Any = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) a__ : str = output.images a__ : List[Any] = image[0, -3:, -3:, -1] a__ : Tuple = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) a__ : List[Any] = torch.manual_seed(lowerCamelCase__ ) a__ : Optional[Any] = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Dict = output.images a__ : Union[str, Any] = image[0, -3:, -3:, -1] a__ : Dict = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase( self : Tuple ): a__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=lowerCamelCase__ ) a__ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) a__ : Optional[int] = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Optional[int] = "padme amidala taking a bath artwork, safe for work, no nudity" a__ : Tuple = 2_734_971_755 a__ : Union[str, Any] = 7 a__ : List[Any] = torch.manual_seed(lowerCamelCase__ ) a__ : List[str] = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) a__ : Tuple = output.images a__ : List[str] = image[0, -3:, -3:, -1] a__ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 a__ : Optional[Any] = torch.manual_seed(lowerCamelCase__ ) a__ : List[str] = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : List[Any] = output.images a__ : Optional[Any] = image[0, -3:, -3:, -1] a__ : str = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase( self : Optional[Any] ): a__ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) a__ : Dict = sd_pipe.to(lowerCamelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ ) a__ : Any = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) a__ : Tuple = 1_044_355_234 a__ : List[Any] = 12 a__ : str = torch.manual_seed(lowerCamelCase__ ) a__ : Any = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , ) a__ : int = output.images a__ : Optional[int] = image[0, -3:, -3:, -1] a__ : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 a__ : Dict = torch.manual_seed(lowerCamelCase__ ) a__ : Tuple = sd_pipe( [prompt] , generator=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) a__ : Dict = output.images a__ : Union[str, Any] = image[0, -3:, -3:, -1] a__ : Tuple = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
37
import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase : List[str] = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCamelCase : Union[str, Any] = None def UpperCamelCase_ ( ) -> List[str]: a__ : List[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=__a , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=__a , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def UpperCamelCase_ ( __a ) -> str: a__ : Optional[Any] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : Dict = bool(qa["answers"]["text"] ) return qid_to_has_ans def UpperCamelCase_ ( __a ) -> List[Any]: def remove_articles(__a ): return ARTICLES_REGEX.sub(" " , __a ) def white_space_fix(__a ): return " ".join(text.split() ) def remove_punc(__a ): a__ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__a ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__a ) ) ) ) def UpperCamelCase_ ( __a ) -> Dict: if not s: return [] return normalize_answer(__a ).split() def UpperCamelCase_ ( __a , __a ) -> str: return int(normalize_answer(__a ) == normalize_answer(__a ) ) def UpperCamelCase_ ( __a , __a ) -> Dict: a__ : int = get_tokens(__a ) a__ : Optional[Any] = get_tokens(__a ) a__ : Any = collections.Counter(__a ) & collections.Counter(__a ) a__ : Dict = sum(common.values() ) if len(__a ) == 0 or len(__a ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 a__ : Tuple = 1.0 * num_same / len(__a ) a__ : str = 1.0 * num_same / len(__a ) a__ : str = (2 * precision * recall) / (precision + recall) return fa def UpperCamelCase_ ( __a , __a ) -> int: a__ : List[str] = {} a__ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: a__ : List[Any] = qa["id"] a__ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(__a )] if not gold_answers: # For unanswerable questions, only correct answer is empty string a__ : Tuple = [""] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue a__ : Tuple = preds[qid] # Take max over all gold answers a__ : Optional[int] = max(compute_exact(__a , __a ) for a in gold_answers ) a__ : str = max(compute_fa(__a , __a ) for a in gold_answers ) return exact_scores, fa_scores def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: a__ : Optional[Any] = {} for qid, s in scores.items(): a__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: a__ : Dict = float(not qid_to_has_ans[qid] ) else: a__ : Optional[Any] = s return new_scores def UpperCamelCase_ ( __a , __a , __a=None ) -> Tuple: if not qid_list: a__ : Union[str, Any] = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: a__ : int = len(__a ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: for k in new_eval: a__ : Optional[Any] = new_eval[k] def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[int]: plt.step(__a , __a , color="b" , alpha=0.2 , where="post" ) plt.fill_between(__a , __a , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__a ) plt.savefig(__a ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a , __a=None , __a=None ) -> Dict: a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) a__ : Any = 0.0 a__ : Optional[int] = 1.0 a__ : Optional[int] = 0.0 a__ : Any = [1.0] a__ : Tuple = [0.0] a__ : List[str] = 0.0 for i, qid in enumerate(__a ): if qid_to_has_ans[qid]: true_pos += scores[qid] a__ : Any = true_pos / float(i + 1 ) a__ : int = true_pos / float(__a ) if i == len(__a ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__a ) recalls.append(__a ) if out_image: plot_pr_curve(__a , __a , __a , __a ) return {"ap": 100.0 * avg_prec} def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> str: if out_image_dir and not os.path.exists(__a ): os.makedirs(__a ) a__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return a__ : Optional[int] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) a__ : str = {k: float(__a ) for k, v in qid_to_has_ans.items()} a__ : Optional[Any] = make_precision_recall_eval( __a , __a , __a , __a , out_image=os.path.join(__a , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(__a , __a , "pr_exact" ) merge_eval(__a , __a , "pr_f1" ) merge_eval(__a , __a , "pr_oracle" ) def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if not qid_list: return a__ : Optional[Any] = [na_probs[k] for k in qid_list] a__ : str = np.ones_like(__a ) / float(len(__a ) ) plt.hist(__a , weights=__a , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(__a , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def UpperCamelCase_ ( __a , __a , __a , __a ) -> Optional[Any]: a__ : str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) a__ : Optional[Any] = num_no_ans a__ : Dict = cur_score a__ : Any = 0.0 a__ : Optional[Any] = sorted(__a , key=lambda __a : na_probs[k] ) for i, qid in enumerate(__a ): if qid not in scores: continue if qid_to_has_ans[qid]: a__ : Optional[int] = scores[qid] else: if preds[qid]: a__ : str = -1 else: a__ : Union[str, Any] = 0 cur_score += diff if cur_score > best_score: a__ : Any = cur_score a__ : Dict = na_probs[qid] return 100.0 * best_score / len(__a ), best_thresh def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Any: a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__, a__ : Tuple = find_best_thresh(__a , __a , __a , __a ) a__ : Any = best_exact a__ : Any = exact_thresh a__ : List[Any] = best_fa a__ : Optional[int] = fa_thresh def UpperCamelCase_ ( ) -> Tuple: with open(OPTS.data_file ) as f: a__ : List[Any] = json.load(__a ) a__ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: a__ : int = json.load(__a ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: a__ : List[str] = json.load(__a ) else: a__ : Optional[int] = {k: 0.0 for k in preds} a__ : Optional[Any] = make_qid_to_has_ans(__a ) # maps qid to True/False a__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v] a__ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v] a__, a__ : str = get_raw_scores(__a , __a ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : str = apply_no_ans_threshold(__a , __a , __a , OPTS.na_prob_thresh ) a__ : Tuple = make_eval_dict(__a , __a ) if has_ans_qids: a__ : str = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "HasAns" ) if no_ans_qids: a__ : List[Any] = make_eval_dict(__a , __a , qid_list=__a ) merge_eval(__a , __a , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(__a , __a , __a , __a , __a , __a ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__a , __a , __a , __a , __a , OPTS.out_image_dir ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(__a , __a , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(__a , __a ) else: print(json.dumps(__a , indent=2 ) ) if __name__ == "__main__": UpperCamelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
37
1
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class A__ ( A__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str = "▁" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[str, AddedToken] = "<unk>" , lowerCamelCase__ : Union[str, AddedToken] = "</s>" , lowerCamelCase__ : Union[str, AddedToken] = "<pad>" , ): a__ : Tuple = { "pad": {"id": 0, "token": pad_token}, "eos": {"id": 1, "token": eos_token}, "unk": {"id": 2, "token": unk_token}, } a__ : Optional[Any] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): a__ : Tuple = token_dict["token"] a__ : Optional[Any] = Tokenizer(Unigram() ) a__ : List[str] = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(" {2,}" ) , " " ), normalizers.Lowercase(), ] ) a__ : Any = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ), pre_tokenizers.Digits(individual_digits=lowerCamelCase__ ), pre_tokenizers.Punctuation(), ] ) a__ : List[str] = decoders.Metaspace(replacement=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ ) a__ : Any = TemplateProcessing( single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , ) a__ : List[Any] = { "model": "SentencePieceUnigram", "replacement": replacement, "add_prefix_space": add_prefix_space, } super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 8_000 , lowerCamelCase__ : bool = True , ): a__ : Tuple = trainers.UnigramTrainer( vocab_size=lowerCamelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase__ , ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ): a__ : str = [files] self._tokenizer.train(lowerCamelCase__ , trainer=lowerCamelCase__ ) self.add_unk_id() def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Iterator[str], Iterator[Iterator[str]]] , lowerCamelCase__ : int = 8_000 , lowerCamelCase__ : bool = True , ): a__ : str = trainers.UnigramTrainer( vocab_size=lowerCamelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCamelCase__ , ) self._tokenizer.train_from_iterator(lowerCamelCase__ , trainer=lowerCamelCase__ ) self.add_unk_id() def _UpperCamelCase( self : Tuple ): a__ : str = json.loads(self._tokenizer.to_str() ) a__ : str = self.special_tokens["unk"]["id"] a__ : List[str] = Tokenizer.from_str(json.dumps(lowerCamelCase__ ) )
37
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = CLIPTokenizer _lowercase = CLIPTokenizerFast _lowercase = True _lowercase = {} _lowercase = False def _UpperCamelCase( self : List[Any] ): super().setUp() # fmt: off a__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on a__ : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) ) a__ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"] a__ : Optional[Any] = {"unk_token": "<unk>"} a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase__ ) ) def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] , **lowerCamelCase__ : Optional[int] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[Any] ): a__ : int = "lower newer" a__ : Optional[int] = "lower newer" return input_text, output_text def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) a__ : int = "lower newer" a__ : List[str] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"] a__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = tokens + [tokenizer.unk_token] a__ : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ ) @require_ftfy def _UpperCamelCase( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : List[str] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : Any = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ ) a__ : int = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." a__ : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : Dict = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways a__ : Optional[Any] = "xa\u0303y" + " " + "x\xe3y" a__ : Optional[int] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of space type a__ : str = [ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: a__ : Any = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) # Test that the tokenization is identical on unicode of line break type a__ : Union[str, Any] = [ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: a__ : List[Any] = tokenizer_s.tokenize(lowerCamelCase__ ) a__ : int = tokenizer_r.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): a__ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name` a__ : Tuple = f'''{text_of_1_token} {text_of_1_token}''' a__ : Optional[int] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Union[str, Any] = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) a__ : Optional[Any] = f''' {text}''' a__ : str = self.rust_tokenizer_class.from_pretrained( lowerCamelCase__ , use_fast=lowerCamelCase__ , ) a__ : Dict = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , ) def _UpperCamelCase( self : int ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(lowerCamelCase__ ) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" ) self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format." ) ) @require_ftfy def _UpperCamelCase( self : int ): super().test_tokenization_python_rust_equals() def _UpperCamelCase( self : str ): # CLIP always lower cases letters pass
37
1
import math from datetime import datetime, timedelta def UpperCamelCase_ ( __a ) -> datetime: a__ : Union[str, Any] = year % 19 a__ : List[str] = year % 4 a__ : str = year % 7 a__ : Any = math.floor(year / 100 ) a__ : List[str] = math.floor((13 + 8 * leap_day_inhibits) / 25 ) a__ : Optional[int] = leap_day_inhibits / 4 a__ : Union[str, Any] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 a__ : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 a__ : Any = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon a__ : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__a , 4 , 18 ) else: return datetime(__a , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): UpperCamelCase : Tuple = """will be""" if year > datetime.now().year else """was""" print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
37
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCamelCase : Dict = """<<<<<<< This should probably be modified because it mentions: """ UpperCamelCase : List[Any] = """======= >>>>>>> """ UpperCamelCase : Optional[Any] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] UpperCamelCase : Any = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def UpperCamelCase_ ( __a ) -> Optional[Any]: return ConvertCommand(args.tfds_path , args.datasets_directory ) class A__ ( A__ ): """simple docstring""" @staticmethod def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ): a__ : List[str] = parser.add_parser( "convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , ) train_parser.add_argument( "--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , ) train_parser.add_argument( "--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple ): a__ : str = get_logger("datasets-cli/converting" ) a__ : Optional[Any] = tfds_path a__ : Optional[int] = datasets_directory def _UpperCamelCase( self : int ): if os.path.isdir(self._tfds_path ): a__ : List[str] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): a__ : Any = os.path.dirname(self._tfds_path ) else: raise ValueError("--tfds_path is neither a directory nor a file. Please check path." ) a__ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) a__ : Tuple = [] a__ : str = [] a__ : List[Any] = {} if os.path.isdir(self._tfds_path ): a__ : List[str] = os.listdir(lowerCamelCase__ ) else: a__ : Union[str, Any] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info("Skipping file" ) continue with open(lowerCamelCase__ , encoding="utf-8" ) as f: a__ : List[Any] = f.readlines() a__ : Union[str, Any] = [] a__ : Union[str, Any] = False a__ : Union[str, Any] = False a__ : Dict = [] for line in lines: a__ : Optional[Any] = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: a__ : List[Any] = "import datasets\n" elif "import tensorflow" in out_line: # order is important here a__ : List[str] = "" continue elif "from absl import logging" in out_line: a__ : Dict = "from datasets import logging\n" elif "getLogger" in out_line: a__ : List[Any] = out_line.replace("getLogger" , "get_logger" ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): a__ : List[str] = True a__ : Dict = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: a__ : Tuple = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: a__ : Optional[int] = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) ) a__ : Optional[Any] = "from . import " + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: a__ : Optional[int] = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset a__ : Dict = f_name.replace(".py" , "" ) a__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) a__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: a__ : Any = os.path.basename(lowerCamelCase__ ) a__ : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ , lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
37
1
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=7 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Union[str, Any]=18 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=[0.5, 0.5, 0.5] , lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] , ): a__ : Tuple = size if size is not None else {"height": 18, "width": 18} a__ : Any = parent a__ : Optional[int] = batch_size a__ : Optional[int] = num_channels a__ : Optional[Any] = image_size a__ : Any = min_resolution a__ : Union[str, Any] = max_resolution a__ : Union[str, Any] = do_resize a__ : Union[str, Any] = size a__ : List[str] = do_normalize a__ : str = image_mean a__ : Dict = image_std def _UpperCamelCase( self : Tuple ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = DPTImageProcessor if is_vision_available() else None def _UpperCamelCase( self : Dict ): a__ : Tuple = DPTImageProcessingTester(self ) @property def _UpperCamelCase( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : Union[str, Any] ): a__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "size" ) ) def _UpperCamelCase( self : Tuple ): a__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) a__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _UpperCamelCase( self : str ): # Initialize image_processing a__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : int = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase( self : Optional[int] ): # Initialize image_processing a__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input a__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : int = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _UpperCamelCase( self : List[Any] ): # Initialize image_processing a__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input a__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : Optional[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
37
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
1
from math import pi def UpperCamelCase_ ( __a , __a ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
37
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") a__ : Union[str, Any] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(__a ): os.makedirs(__a ) a__ : Any = model.state_dict() def to_tf_var_name(__a ): for patt, repl in iter(__a ): a__ : Tuple = name.replace(__a , __a ) return f'''bert/{name}''' def create_tf_var(__a , __a , __a ): a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype ) a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__a ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a__ : int = to_tf_var_name(__a ) a__ : Union[str, Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a__ : int = torch_tensor.T a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a ) tf.keras.backend.set_value(__a , __a ) a__ : int = session.run(__a ) print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' ) a__ : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) ) def UpperCamelCase_ ( __a=None ) -> int: a__ : Dict = argparse.ArgumentParser() parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" ) a__ : Optional[Any] = parser.parse_args(__a ) a__ : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
37
1
def UpperCamelCase_ ( __a , __a , __a , __a ) -> str: if height >= 1: move_tower(height - 1 , __a , __a , __a ) move_disk(__a , __a ) move_tower(height - 1 , __a , __a , __a ) def UpperCamelCase_ ( __a , __a ) -> List[Any]: print("moving disk from" , __a , "to" , __a ) def UpperCamelCase_ ( ) -> Optional[Any]: a__ : Any = int(input("Height of hanoi: " ).strip() ) move_tower(__a , "A" , "B" , "C" ) if __name__ == "__main__": main()
37
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ): a__ : str = parent a__ : Any = batch_size a__ : Dict = patch_size a__ : List[Any] = max_length a__ : str = num_mel_bins a__ : Optional[Any] = is_training a__ : Optional[int] = use_labels a__ : List[Any] = hidden_size a__ : str = num_hidden_layers a__ : Any = num_attention_heads a__ : Union[str, Any] = intermediate_size a__ : List[str] = hidden_act a__ : str = hidden_dropout_prob a__ : Tuple = attention_probs_dropout_prob a__ : List[Any] = type_sequence_label_size a__ : Any = initializer_range a__ : str = scope a__ : List[str] = frequency_stride a__ : Union[str, Any] = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1 a__ : Tuple = frequency_out_dimension * time_out_dimension a__ : List[str] = num_patches + 2 def _UpperCamelCase( self : List[str] ): a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a__ : List[Any] = None if self.use_labels: a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a__ : List[str] = self.get_config() return config, input_values, labels def _UpperCamelCase( self : Optional[int] ): return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ): a__ : List[Any] = ASTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase( self : str ): a__ : Dict = self.prepare_config_and_inputs() ( ( a__ ), ( a__ ), ( a__ ), ) : Optional[int] = config_and_inputs a__ : List[Any] = {"input_values": input_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) _lowercase = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _UpperCamelCase( self : str ): a__ : str = ASTModelTester(self ) a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def _UpperCamelCase( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _UpperCamelCase( self : List[str] ): pass def _UpperCamelCase( self : Optional[int] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Any = model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def _UpperCamelCase( self : Tuple ): a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowerCamelCase__ ) a__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : Optional[int] = [*signature.parameters.keys()] a__ : Optional[Any] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) @slow def _UpperCamelCase( self : int ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def UpperCamelCase_ ( ) -> Any: a__ : Optional[int] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" ) a__, a__ : List[str] = torchaudio.load(__a ) return audio, sampling_rate @require_torch @require_torchaudio class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase( self : List[str] ): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _UpperCamelCase( self : Optional[int] ): a__ : int = self.default_feature_extractor a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ ) a__ : Any = self.default_feature_extractor a__, a__ : Dict = prepare_audio() a__ : str = audio.squeeze().numpy() a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): a__ : Any = model(**lowerCamelCase__ ) # verify the logits a__ : Union[str, Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
37
1
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCamelCase : Optional[Any] = logging.get_logger(__name__) def UpperCamelCase_ ( __a , __a , __a ) -> int: return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def UpperCamelCase_ ( __a , __a , __a = None ) -> List[Any]: a__ : Union[str, Any] = tesseract_config if tesseract_config is not None else "" # apply OCR a__ : Any = to_pil_image(__a ) a__, a__ : Dict = pil_image.size a__ : Optional[Any] = pytesseract.image_to_data(__a , lang=__a , output_type="dict" , config=__a ) a__, a__, a__, a__, a__ : str = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates a__ : Optional[int] = [idx for idx, word in enumerate(__a ) if not word.strip()] a__ : Any = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] a__ : Any = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] a__ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] a__ : Tuple = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] a__ : int = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format a__ : Optional[int] = [] for x, y, w, h in zip(__a , __a , __a , __a ): a__ : Optional[int] = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes a__ : List[str] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a , __a , __a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class A__ ( A__ ): """simple docstring""" _lowercase = ['pixel_values'] def __init__( self : Any , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "" , **lowerCamelCase__ : Optional[int] , ): super().__init__(**lowerCamelCase__ ) a__ : Tuple = size if size is not None else {"height": 224, "width": 224} a__ : Optional[Any] = get_size_dict(lowerCamelCase__ ) a__ : Any = do_resize a__ : Any = size a__ : Dict = resample a__ : str = apply_ocr a__ : Union[str, Any] = ocr_lang a__ : Any = tesseract_config def _UpperCamelCase( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[int] , ): a__ : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) a__ : Optional[int] = (size["height"], size["width"]) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Any , ): a__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize a__ : Dict = size if size is not None else self.size a__ : str = get_size_dict(lowerCamelCase__ ) a__ : int = resample if resample is not None else self.resample a__ : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr a__ : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang a__ : Optional[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config a__ : str = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) # All transformations expect numpy arrays. a__ : Dict = [to_numpy_array(lowerCamelCase__ ) for image in images] if apply_ocr: requires_backends(self , "pytesseract" ) a__ : Dict = [] a__ : Optional[int] = [] for image in images: a__, a__ : Union[str, Any] = apply_tesseract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) words_batch.append(lowerCamelCase__ ) boxes_batch.append(lowerCamelCase__ ) if do_resize: a__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) a__ : Union[str, Any] = [flip_channel_order(lowerCamelCase__ ) for image in images] a__ : Optional[int] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] a__ : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCamelCase__ ) if apply_ocr: a__ : List[Any] = words_batch a__ : Union[str, Any] = boxes_batch return data
37
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = XGLMTokenizer _lowercase = XGLMTokenizerFast _lowercase = True _lowercase = True def _UpperCamelCase( self : List[Any] ): super().setUp() # We have a SentencePiece fixture for testing a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase( self : List[Any] ): a__ : int = "<pad>" a__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ ) def _UpperCamelCase( self : Optional[Any] ): a__ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(lowerCamelCase__ ) , 1_008 ) def _UpperCamelCase( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def _UpperCamelCase( self : Optional[int] ): a__ : str = XGLMTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ ) a__ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a__ : List[str] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) a__ : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _UpperCamelCase( self : Dict ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCamelCase__ , f.name ) a__ : Any = XGLMTokenizer(f.name , keep_accents=lowerCamelCase__ ) a__ : List[str] = pickle.dumps(lowerCamelCase__ ) pickle.loads(lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : Tuple = "I was born in 92000, and this is falsé." a__ : List[str] = tokenizer.tokenize(lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.tokenize(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : Optional[int] = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) a__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = self.get_rust_tokenizer() a__ : Tuple = tokenizer.encode(lowerCamelCase__ ) a__ : Optional[Any] = rust_tokenizer.encode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ ) @slow def _UpperCamelCase( self : List[str] ): a__ : Union[str, Any] = "Hello World!" a__ : List[str] = [2, 31_227, 4_447, 35] self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off a__ : Union[str, Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) ) @slow def _UpperCamelCase( self : List[Any] ): # fmt: off a__ : Optional[int] = { "input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ , model_name="facebook/xglm-564M" , padding=lowerCamelCase__ , )
37
1
class A__ : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] ): a__ : str = name a__ : Optional[int] = value a__ : Dict = weight def __repr__( self : Union[str, Any] ): return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _UpperCamelCase( self : Dict ): return self.value def _UpperCamelCase( self : Optional[Any] ): return self.name def _UpperCamelCase( self : Optional[Any] ): return self.weight def _UpperCamelCase( self : Optional[int] ): return self.value / self.weight def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]: a__ : Optional[Any] = [] for i in range(len(__a ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def UpperCamelCase_ ( __a , __a , __a ) -> Union[str, Any]: a__ : List[str] = sorted(__a , key=__a , reverse=__a ) a__ : List[Any] = [] a__, a__ : Union[str, Any] = 0.0, 0.0 for i in range(len(__a ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def UpperCamelCase_ ( ) -> Union[str, Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
37
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def UpperCamelCase_ ( ) -> int: a__ : int = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" a__ : Optional[Any] = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" ) return image def UpperCamelCase_ ( __a ) -> Optional[Any]: a__ : Any = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def UpperCamelCase_ ( __a , __a , __a ) -> List[str]: a__ : Union[str, Any] = dct.pop(__a ) a__ : List[str] = val def UpperCamelCase_ ( __a , __a ) -> Optional[Any]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases a__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' ) a__ : Tuple = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict a__ : str = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) a__ : int = qkv_bias def UpperCamelCase_ ( __a ) -> Dict: a__ : Tuple = 364 if "coco" in model_name else 224 a__ : int = InstructBlipVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: a__ : Tuple = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: a__ : Dict = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: a__ : List[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: a__ : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 a__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() a__ : Any = InstructBlipConfig(vision_config=__a , text_config=__a , qformer_config=__a ) return config, image_size @torch.no_grad() def UpperCamelCase_ ( __a , __a=None , __a=False ) -> int: a__ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: a__ : List[Any] = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) a__ : Union[str, Any] = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) a__, a__ : List[str] = get_blipa_config(__a ) a__ : Any = InstructBlipForConditionalGeneration(__a ).eval() a__ : Dict = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } a__, a__ : Dict = model_name_to_original[model_name] # load original model print("Loading original model..." ) a__ : Optional[Any] = "cuda:1" if torch.cuda.is_available() else "cpu" a__ : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu" a__, a__, a__ : Tuple = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print("Done!" ) # update state dict keys a__ : Dict = original_model.state_dict() a__ : Optional[int] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): a__ : Optional[int] = state_dict.pop(__a ) if key.startswith("Qformer.bert" ): a__ : List[Any] = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: a__ : Any = key.replace("self" , "attention" ) if "llm_proj" in key: a__ : Dict = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: a__ : int = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): a__ : List[str] = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): a__ : str = key.replace("t5" , "language" ) a__ : Dict = val # read in qv biases read_in_q_v_bias(__a , __a ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__a , strict=__a ) a__ : Union[str, Any] = load_demo_image() a__ : int = "What is unusual about this image?" # create processor a__ : Any = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__a , image_std=__a ) a__ : Tuple = InstructBlipProcessor( image_processor=__a , tokenizer=__a , qformer_tokenizer=__a , ) a__ : Tuple = processor(images=__a , text=__a , return_tensors="pt" ).to(__a ) # make sure processor creates exact same pixel values a__ : Optional[int] = vis_processors["eval"](__a ).unsqueeze(0 ).to(__a ) a__ : Optional[Any] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "vicuna" in model_name: a__ : str = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits a__ : List[str] = hf_model(**__a ).logits else: a__ : List[Any] = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits a__ : str = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__a ) a__ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) a__ : Any = hf_model(**__a , labels=__a ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape a__ : Tuple = 1e-4 if "vicuna" in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , __a , atol=__a ) print("Looks ok!" ) print("Generating with original model..." ) a__ : Tuple = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) a__ : int = hf_model.generate( **__a , do_sample=__a , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? a__ : int = 2 print("Original generation:" , __a ) a__ : str = processor.batch_decode(__a , skip_special_tokens=__a ) a__ : str = [text.strip() for text in output_text] print("HF generation:" , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f'''Salesforce/{model_name}''' ) hf_model.push_to_hub(f'''Salesforce/{model_name}''' ) if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() UpperCamelCase : Optional[int] = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase : Dict = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
37
1
class A__ : """simple docstring""" def __init__( self : Tuple , lowerCamelCase__ : str = "" , lowerCamelCase__ : bool = False ): # Mapping from the first character of the prefix of the node a__ : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word a__ : Optional[Any] = is_leaf a__ : Optional[Any] = prefix def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : str ): a__ : List[str] = 0 for q, w in zip(self.prefix , lowerCamelCase__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def _UpperCamelCase( self : str , lowerCamelCase__ : list[str] ): for word in words: self.insert(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : str ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: a__ : Optional[int] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: a__ : Optional[int] = RadixNode(prefix=lowerCamelCase__ , is_leaf=lowerCamelCase__ ) else: a__ : List[str] = self.nodes[word[0]] a__, a__, a__ : Dict = incoming_node.match( lowerCamelCase__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCamelCase__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: a__ : Optional[Any] = remaining_prefix a__ : str = self.nodes[matching_string[0]] a__ : Union[str, Any] = RadixNode(lowerCamelCase__ , lowerCamelCase__ ) a__ : int = aux_node if remaining_word == "": a__ : Union[str, Any] = True else: self.nodes[matching_string[0]].insert(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = self.nodes.get(word[0] , lowerCamelCase__ ) if not incoming_node: return False else: a__, a__, a__ : List[Any] = incoming_node.match( lowerCamelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCamelCase__ ) def _UpperCamelCase( self : int , lowerCamelCase__ : str ): a__ : Any = self.nodes.get(word[0] , lowerCamelCase__ ) if not incoming_node: return False else: a__, a__, a__ : Tuple = incoming_node.match( lowerCamelCase__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCamelCase__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: a__ : Any = list(self.nodes.values() )[0] a__ : List[str] = merging_node.is_leaf self.prefix += merging_node.prefix a__ : List[str] = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: a__ : Tuple = False # If there is 1 edge, we merge it with its child else: a__ : Union[str, Any] = list(incoming_node.nodes.values() )[0] a__ : Optional[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix a__ : Dict = merging_node.nodes return True def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : int = 0 ): if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def UpperCamelCase_ ( ) -> bool: a__ : Tuple = "banana bananas bandana band apple all beast".split() a__ : Union[str, Any] = RadixNode() root.insert_many(__a ) assert all(root.find(__a ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def UpperCamelCase_ ( ) -> None: assert test_trie() def UpperCamelCase_ ( ) -> None: a__ : Tuple = RadixNode() a__ : int = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(__a ) print("Words:" , __a ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
37
def UpperCamelCase_ ( __a , __a ) -> Tuple: a__ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 a__ : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. a__ : Any = min(__a , __a ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
37
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Dict=18 , lowerCamelCase__ : str=30 , lowerCamelCase__ : Tuple=400 , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ): a__ : Any = parent a__ : Any = batch_size a__ : Any = num_channels a__ : Union[str, Any] = image_size a__ : Optional[Any] = min_resolution a__ : str = max_resolution a__ : List[str] = do_resize a__ : Any = size if size is not None else {"height": 18, "width": 20} a__ : Union[str, Any] = do_thumbnail a__ : Any = do_align_axis a__ : List[Any] = do_pad a__ : List[str] = do_normalize a__ : Any = image_mean a__ : List[Any] = image_std def _UpperCamelCase( self : Union[str, Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class A__ ( A__ , unittest.TestCase ): """simple docstring""" _lowercase = DonutImageProcessor if is_vision_available() else None def _UpperCamelCase( self : Tuple ): a__ : int = DonutImageProcessingTester(self ) @property def _UpperCamelCase( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _UpperCamelCase( self : List[Any] ): a__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "size" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_thumbnail" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_align_long_axis" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_pad" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) ) def _UpperCamelCase( self : Dict ): a__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 20} ) a__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) # Previous config had dimensions in (width, height) order a__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"height": 84, "width": 42} ) def _UpperCamelCase( self : Any ): pass @is_flaky() def _UpperCamelCase( self : List[Any] ): # Initialize image_processing a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , Image.Image ) # Test not batched input a__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : Tuple = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def _UpperCamelCase( self : Optional[int] ): # Initialize image_processing a__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors a__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , np.ndarray ) # Test not batched input a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : Optional[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) @is_flaky() def _UpperCamelCase( self : str ): # Initialize image_processing a__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors a__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase__ , torch.Tensor ) # Test not batched input a__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched a__ : str = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
37
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Optional[Any] = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } UpperCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = LEDTokenizer _lowercase = ['input_ids', 'attention_mask'] def __init__( self : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int="replace" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Any="<mask>" , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : int=True , **lowerCamelCase__ : Union[str, Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : List[str] = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : Optional[Any] = add_prefix_space a__ : List[str] = pre_tok_class(**lowerCamelCase__ ) a__ : Optional[int] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a__ : Any = "post_processor" a__ : str = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Any = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Optional[Any] = tuple(state["sep"] ) if "cls" in state: a__ : Optional[Any] = tuple(state["cls"] ) a__ : Optional[int] = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Dict = add_prefix_space a__ : int = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : List[Any] = trim_offsets a__ : List[str] = True if changes_to_apply: a__ : int = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : int = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ): a__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : Union[str, Any] = value def _UpperCamelCase( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ): a__ : List[str] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None ): a__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : List[str] = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ): a__ : str = super()._pad( encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) # Load from model defaults if return_attention_mask is None: a__ : Optional[int] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: a__ : Tuple = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. a__ : Dict = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase__ ) if needs_to_be_padded: a__ : Union[str, Any] = len(lowerCamelCase__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` a__ : List[Any] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": a__ : Any = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
37
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed UpperCamelCase : int = logging.getLogger(__name__) def UpperCamelCase_ ( __a=2 , __a=3 , __a=16 , __a = 10 , __a = 2 ) -> Optional[int]: def get_dataset(__a ): a__ : Tuple = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__a , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) a__ : str = get_dataset(__a ) a__ : Union[str, Any] = get_dataset(__a ) a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 ) a__ : str = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a=None ) -> Optional[int]: a__ : Optional[Any] = [] for epoch in range(__a ): # Train quickly model.train() for batch in dataloader: a__, a__ : str = batch a__ : Optional[int] = model(__a ) a__ : Tuple = torch.nn.functional.mse_loss(__a , __a ) accelerator.backward(__a ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class A__ ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] ): super().__init__() a__ : List[Any] = nn.Parameter(torch.randn(1 ) ) a__ : Optional[Any] = nn.Parameter(torch.randn(1 ) ) def _UpperCamelCase( self : int , lowerCamelCase__ : List[Any] ): return x * self.a + self.b class A__ ( unittest.TestCase ): """simple docstring""" def _UpperCamelCase( self : Dict ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) a__ : Optional[int] = DummyModel() a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__, a__ : int = dummy_dataloaders() a__ : List[Any] = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ ) # Train baseline a__ : int = Accelerator(project_config=lowerCamelCase__ ) a__, a__, a__, a__ : str = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def _UpperCamelCase( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) a__ : Union[str, Any] = DummyModel() a__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__, a__ : Any = dummy_dataloaders() # Train baseline a__ : Optional[Any] = Accelerator() a__, a__, a__, a__ : str = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save initial a__ : Optional[int] = os.path.join(lowerCamelCase__ , "initial" ) accelerator.save_state(lowerCamelCase__ ) ((a__), (a__)) : Any = model.a.item(), model.b.item() a__ : Optional[int] = optimizer.state_dict() a__ : Any = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ((a__), (a__)) : Dict = model.a.item(), model.b.item() a__ : List[Any] = optimizer.state_dict() # Train partially set_seed(42 ) a__ : List[Any] = DummyModel() a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__, a__ : str = dummy_dataloaders() a__ : Any = Accelerator() a__, a__, a__, a__ : List[str] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) accelerator.load_state(lowerCamelCase__ ) ((a__), (a__)) : Any = model.a.item(), model.b.item() a__ : Dict = optimizer.state_dict() self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save everything a__ : Tuple = os.path.join(lowerCamelCase__ , "checkpoint" ) accelerator.save_state(lowerCamelCase__ ) # Load everything back in and make sure all states work accelerator.load_state(lowerCamelCase__ ) test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ((a__), (a__)) : Any = model.a.item(), model.b.item() a__ : Optional[Any] = optimizer.state_dict() self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) a__ : int = DummyModel() a__ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__, a__ : List[str] = dummy_dataloaders() a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ ) # Train baseline a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ ) a__, a__, a__, a__ : List[str] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save initial accelerator.save_state() ((a__), (a__)) : Optional[Any] = model.a.item(), model.b.item() a__ : List[Any] = optimizer.state_dict() a__ : Union[str, Any] = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ((a__), (a__)) : str = model.a.item(), model.b.item() a__ : List[Any] = optimizer.state_dict() # Train partially set_seed(42 ) a__ : str = DummyModel() a__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__, a__ : Optional[int] = dummy_dataloaders() a__ : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ ) a__ : str = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ ) a__, a__, a__, a__ : str = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) ((a__), (a__)) : int = model.a.item(), model.b.item() a__ : List[str] = optimizer.state_dict() self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) a__ : List[str] = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_1" ) ) test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ((a__), (a__)) : Any = model.a.item(), model.b.item() a__ : Dict = optimizer.state_dict() self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCamelCase( self : List[Any] ): a__ : int = torch.tensor([1, 2, 3] ) a__ : List[Any] = torch.tensor([2, 3, 4] ) a__ : Tuple = DummyModel() a__ : List[str] = torch.optim.Adam(net.parameters() ) a__ : Optional[Any] = Accelerator() with self.assertRaises(lowerCamelCase__ ) as ve: accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = str(ve.exception ) self.assertTrue("Item at index 0" in message ) self.assertTrue("Item at index 1" in message ) self.assertFalse("Item at index 2" in message ) self.assertFalse("Item at index 3" in message ) def _UpperCamelCase( self : str ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) a__ : List[Any] = DummyModel() a__ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) a__ : List[Any] = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 ) a__, a__ : Optional[int] = dummy_dataloaders() a__ : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ ) # Train baseline a__ : int = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ ) a__, a__, a__, a__, a__ : List[Any] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save initial accelerator.save_state() a__ : Optional[int] = scheduler.state_dict() train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) self.assertEqual(lowerCamelCase__ , scheduler.state_dict() ) def _UpperCamelCase( self : Optional[Any] ): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) a__ : int = DummyModel() a__ : int = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 ) # Train baseline a__ : Tuple = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ ) a__ : Optional[Any] = accelerator.prepare(lowerCamelCase__ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_9" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , "checkpoints" , "checkpoint_10" ) ) ) @require_cuda def _UpperCamelCase( self : Tuple ): a__ : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) if __name__ == "__main__": UpperCamelCase : Optional[Any] = """/tmp/accelerate/state_checkpointing""" UpperCamelCase : Optional[int] = DummyModel() UpperCamelCase : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3) UpperCamelCase : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) UpperCamelCase , UpperCamelCase : Tuple = dummy_dataloaders() UpperCamelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline UpperCamelCase : Union[str, Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) UpperCamelCase , UpperCamelCase : List[str] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: UpperCamelCase : Tuple = group["""params"""][0].device break assert param_device.type == accelerator.device.type UpperCamelCase : Dict = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""") for group in optimizer.param_groups: UpperCamelCase : str = group["""params"""][0].device break assert ( param_device.type == torch.device("""cpu""").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""") for group in optimizer.param_groups: UpperCamelCase : Tuple = group["""params"""][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""): accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
37
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase : Union[str, Any] = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } UpperCamelCase : List[str] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class A__ ( A__ ): """simple docstring""" _lowercase = VOCAB_FILES_NAMES _lowercase = PRETRAINED_VOCAB_FILES_MAP _lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase = ['input_ids', 'attention_mask'] _lowercase = RobertaTokenizer def __init__( self : List[str] , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : Tuple="<mask>" , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=True , **lowerCamelCase__ : Optional[Any] , ): super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) a__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : Any = getattr(lowerCamelCase__ , pre_tok_state.pop("type" ) ) a__ : int = add_prefix_space a__ : Tuple = pre_tok_class(**lowerCamelCase__ ) a__ : str = add_prefix_space a__ : Tuple = "post_processor" a__ : Dict = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: a__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a__ : Tuple = tuple(state["sep"] ) if "cls" in state: a__ : str = tuple(state["cls"] ) a__ : str = False if state.get("add_prefix_space" , lowerCamelCase__ ) != add_prefix_space: a__ : str = add_prefix_space a__ : Any = True if state.get("trim_offsets" , lowerCamelCase__ ) != trim_offsets: a__ : int = trim_offsets a__ : Dict = True if changes_to_apply: a__ : Union[str, Any] = getattr(lowerCamelCase__ , state.pop("type" ) ) a__ : str = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def _UpperCamelCase( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Tuple ): a__ : List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value a__ : List[str] = value def _UpperCamelCase( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : int ): a__ : Optional[int] = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ): a__ : Dict = kwargs.get("is_split_into_words" , lowerCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCamelCase( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ): a__ : int = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=None ): a__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ): a__ : Tuple = [self.sep_token_id] a__ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
37
1