code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
def lowercase_ ( _lowerCamelCase : str = "matrix.txt"):
with open(os.path.join(os.path.dirname(UpperCAmelCase_) , UpperCAmelCase_)) as in_file:
lowercase__ : Optional[Any] = in_file.read()
lowercase__ : Optional[Any] = [[int(UpperCAmelCase_) for cell in row.split(",")] for row in data.strip().splitlines()]
lowercase__ : Dict = [[0 for cell in row] for row in grid]
lowercase__ : Tuple = len(grid[0])
lowercase__ : List[Any] = [[0 for i in range(UpperCAmelCase_)] for j in range(UpperCAmelCase_)]
lowercase__ : int = grid[0][0]
for i in range(1 , UpperCAmelCase_):
lowercase__ : List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_):
lowercase__ : Optional[int] = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_):
for j in range(1 , UpperCAmelCase_):
lowercase__ : List[Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1])
return dp[-1][-1]
if __name__ == "__main__":
print(f"{solution() = }")
| 353 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase__ ,unittest.TestCase ):
__A : Dict = GPTaTokenizer
__A : int = GPTaTokenizerFast
__A : Union[str, Any] = True
__A : List[str] = {"add_prefix_space": True}
__A : Union[str, Any] = False
def __UpperCamelCase ( self : List[Any] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : Optional[int] , **lowercase_ : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Any , **lowercase_ : Tuple ) -> Tuple:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> int:
lowercase__ : Optional[Any] = "lower newer"
lowercase__ : Any = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowercase__ : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Union[str, Any] = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : str = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : List[str] = tokens + [tokenizer.unk_token]
lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
if not self.test_rust_tokenizer:
return
lowercase__ : str = self.get_tokenizer()
lowercase__ : str = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase__ : Tuple = "lower newer"
# Testing tokenization
lowercase__ : Optional[Any] = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : List[str] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Optional[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
lowercase__ : str = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
lowercase__ : List[Any] = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Any = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing the unknown token
lowercase__ : List[str] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str ) -> Optional[int]:
pass
def __UpperCamelCase ( self : Dict , lowercase_ : int=15 ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
lowercase__ : str = "This is a simple input"
lowercase__ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Tuple = ("This is a simple input", "This is a pair")
lowercase__ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Union[str, Any] = "This is a simple input"
lowercase__ : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : Optional[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : int = tokenizer.pad_token_id
lowercase__ : List[str] = tokenizer(lowercase_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : int = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np" )
lowercase__ : Any = tokenizer(*lowercase_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[Any] = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
lowercase__ : Optional[Any] = "$$$"
lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ )
lowercase__ : List[str] = "This is a simple input"
lowercase__ : str = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[Any] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(lowercase_ )
lowercase__ : List[Any] = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] , lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : List[str] = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ : str = "Encode this."
lowercase__ : Optional[Any] = "This one too please."
lowercase__ : int = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer.encode_plus(
lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , )
lowercase__ : Union[str, Any] = encoded_sequence_dict["input_ids"]
lowercase__ : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
lowercase__ : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
lowercase__ : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ , lowercase_ )
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowercase_ )
lowercase__ : Union[str, Any] = "A photo of a cat"
lowercase__ : Dict = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : str = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=lowercase_ )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
lowercase_ , )
# Same as above
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def __UpperCamelCase ( self : int ) -> Dict:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=lowercase_ )
lowercase__ : Any = "bos"
lowercase__ : Tuple = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[int] = "A photo of a cat"
lowercase__ : Any = tokenizer.encode(
lowercase_ , )
# We changed the bos token
self.assertEqual(lowercase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Tuple = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Optional[Any] = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
from manim import *
class snake_case_ ( __A ):
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase__ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ : Tuple = [mem.copy() for i in range(6 )]
lowercase__ : List[Any] = [mem.copy() for i in range(6 )]
lowercase__ : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowercase__ : Optional[int] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowercase__ : Optional[int] = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowercase__ : Dict = Text("CPU" , font_size=24 )
lowercase__ : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowercase__ : str = [mem.copy() for i in range(1 )]
lowercase__ : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowercase__ : Tuple = Text("GPU" , font_size=24 )
lowercase__ : Optional[int] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
lowercase__ : Tuple = [mem.copy() for i in range(6 )]
lowercase__ : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowercase__ : Any = Text("Model" , font_size=24 )
lowercase__ : List[Any] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
lowercase__ : Optional[int] = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
lowercase__ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ : str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
lowercase__ : Dict = []
lowercase__ : List[Any] = []
lowercase__ : List[str] = []
for i, rect in enumerate(lowerCamelCase_ ):
lowercase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
lowercase__ : Tuple = 0.46 / 4
lowercase__ : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 355 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 0 |
from math import isqrt, loga
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : Optional[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , __UpperCAmelCase , __UpperCAmelCase):
lowercase__ : List[str] = False
return [i for i in range(2 , __UpperCAmelCase) if is_prime[i]]
def lowercase_ ( _lowerCamelCase : Union[str, Any] = 80_0800 , _lowerCamelCase : List[str] = 80_0800):
lowercase__ : Any = degree * loga(__UpperCAmelCase)
lowercase__ : List[str] = int(__UpperCAmelCase)
lowercase__ : Optional[Any] = calculate_prime_numbers(__UpperCAmelCase)
lowercase__ : Optional[Any] = 0
lowercase__ : List[str] = 0
lowercase__ : Optional[Any] = len(__UpperCAmelCase) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class snake_case_ ( A__ ):
__A : int = 'conditional_detr'
__A : Any = ['past_key_values']
__A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Dict , lowercase_ : Any=True , lowercase_ : str=None , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=3_00 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=20_48 , lowercase_ : Optional[Any]=8 , lowercase_ : Dict=6 , lowercase_ : List[str]=20_48 , lowercase_ : Optional[Any]=8 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=True , lowercase_ : Any="relu" , lowercase_ : Tuple=2_56 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=0.0 , lowercase_ : Any=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1.0 , lowercase_ : Optional[Any]=False , lowercase_ : int="sine" , lowercase_ : Tuple="resnet50" , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=False , lowercase_ : Optional[int]=2 , lowercase_ : str=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=1 , lowercase_ : List[str]=1 , lowercase_ : int=2 , lowercase_ : Optional[int]=5 , lowercase_ : List[str]=2 , lowercase_ : Dict=0.25 , **lowercase_ : List[str] , ) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : int = backbone_config.get("model_type" )
lowercase__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : Any = config_class.from_dict(lowerCamelCase__ )
lowercase__ : Optional[int] = use_timm_backbone
lowercase__ : Union[str, Any] = backbone_config
lowercase__ : Tuple = num_channels
lowercase__ : int = num_queries
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : Dict = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Union[str, Any] = decoder_ffn_dim
lowercase__ : str = decoder_layers
lowercase__ : Dict = decoder_attention_heads
lowercase__ : Any = dropout
lowercase__ : str = attention_dropout
lowercase__ : int = activation_dropout
lowercase__ : List[Any] = activation_function
lowercase__ : List[str] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Union[str, Any] = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : int = encoder_layers
lowercase__ : List[str] = auxiliary_loss
lowercase__ : int = position_embedding_type
lowercase__ : Tuple = backbone
lowercase__ : int = use_pretrained_backbone
lowercase__ : Optional[Any] = dilation
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Any = bbox_cost
lowercase__ : Tuple = giou_cost
# Loss coefficients
lowercase__ : Dict = mask_loss_coefficient
lowercase__ : List[Any] = dice_loss_coefficient
lowercase__ : Optional[int] = cls_loss_coefficient
lowercase__ : Tuple = bbox_loss_coefficient
lowercase__ : Optional[Any] = giou_loss_coefficient
lowercase__ : Any = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __UpperCamelCase ( self : int ) -> Any:
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : str ) -> Tuple:
return self.d_model
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : Optional[int] = self.backbone_config.to_dict()
lowercase__ : int = self.__class__.model_type
return output
class snake_case_ ( A__ ):
__A : Union[str, Any] = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return 1E-5
@property
def __UpperCamelCase ( self : List[str] ) -> int:
return 12
| 357 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case_ :
def __init__( self : Any , lowercase_ : Collection[float] | None = None ) -> Tuple:
if components is None:
lowercase__ : Any = []
lowercase__ : Any = list(lowercase_ )
def __len__( self : List[Any] ) -> List[Any]:
return len(self.__components )
def __str__( self : List[Any] ) -> List[str]:
return "(" + ",".join(map(lowercase_ , self.__components ) ) + ")"
def __add__( self : Union[str, Any] , lowercase_ : Vector ) -> Tuple:
lowercase__ : Any = len(self )
if size == len(lowercase_ ):
lowercase__ : Union[str, Any] = [self.__components[i] + other.component(lowercase_ ) for i in range(lowercase_ )]
return Vector(lowercase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : Optional[Any] , lowercase_ : Vector ) -> int:
lowercase__ : List[str] = len(self )
if size == len(lowercase_ ):
lowercase__ : List[Any] = [self.__components[i] - other.component(lowercase_ ) for i in range(lowercase_ )]
return Vector(lowercase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowercase_ : float ) -> str:
...
@overload
def __mul__( self : str , lowercase_ : Vector ) -> Any:
...
def __mul__( self : int , lowercase_ : float | Vector ) -> Any:
if isinstance(lowercase_ , (float, int) ):
lowercase__ : int = [c * other for c in self.__components]
return Vector(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and len(self ) == len(lowercase_ ):
lowercase__ : Optional[int] = len(self )
lowercase__ : int = [self.__components[i] * other.component(lowercase_ ) for i in range(lowercase_ )]
return sum(lowercase_ )
else: # error case
raise Exception("invalid operand!" )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
return Vector(self.__components )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int ) -> str:
if isinstance(lowercase_ , lowercase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : float ) -> Any:
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : Tuple = value
def __UpperCamelCase ( self : Dict ) -> Any:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase_ ) )
def __UpperCamelCase ( self : List[Any] , lowercase_ : Vector , lowercase_ : bool = False ) -> List[str]:
lowercase__ : Dict = self * other
lowercase__ : int = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase_ ( _lowerCamelCase : Tuple):
assert isinstance(A_ , A_)
return Vector([0] * dimension)
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Dict):
assert isinstance(A_ , A_) and (isinstance(A_ , A_))
lowercase__ : Any = [0] * dimension
lowercase__ : str = 1
return Vector(A_)
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]):
assert (
isinstance(A_ , A_)
and isinstance(A_ , A_)
and (isinstance(A_ , (int, float)))
)
return x * scalar + y
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str):
random.seed(A_)
lowercase__ : Optional[int] = [random.randint(A_ , A_) for _ in range(A_)]
return Vector(A_)
class snake_case_ :
def __init__( self : Tuple , lowercase_ : list[list[float]] , lowercase_ : int , lowercase_ : int ) -> str:
lowercase__ : Any = matrix
lowercase__ : Dict = w
lowercase__ : Dict = h
def __str__( self : str ) -> Dict:
lowercase__ : List[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , lowercase_ : Matrix ) -> Dict:
if self.__width == other.width() and self.__height == other.height():
lowercase__ : List[Any] = []
for i in range(self.__height ):
lowercase__ : Tuple = [
self.__matrix[i][j] + other.component(lowercase_ , lowercase_ )
for j in range(self.__width )
]
matrix.append(lowercase_ )
return Matrix(lowercase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowercase_ : Matrix ) -> int:
if self.__width == other.width() and self.__height == other.height():
lowercase__ : int = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] - other.component(lowercase_ , lowercase_ )
for j in range(self.__width )
]
matrix.append(lowercase_ )
return Matrix(lowercase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Dict , lowercase_ : float ) -> Optional[int]:
...
@overload
def __mul__( self : Optional[Any] , lowercase_ : Vector ) -> Dict:
...
def __mul__( self : Tuple , lowercase_ : float | Vector ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ): # matrix-vector
if len(lowercase_ ) == self.__width:
lowercase__ : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : List[str] = [
self.__matrix[i][j] * other.component(lowercase_ )
for j in range(self.__width )
]
ans.change_component(lowercase_ , sum(lowercase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowercase_ , (int, float) ): # matrix-scalar
lowercase__ : Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase_ , self.__width , self.__height )
return None
def __UpperCamelCase ( self : int ) -> Tuple:
return self.__height
def __UpperCamelCase ( self : str ) -> List[Any]:
return self.__width
def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : int ) -> str:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : float ) -> int:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Optional[int] = value
else:
raise Exception("change_component: indices out of bounds" )
def __UpperCamelCase ( self : Any , lowercase_ : int , lowercase_ : int ) -> List[Any]:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase_ ) ):
lowercase__ : str = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase_ , self.__width - 1 , self.__height - 1 ).determinant()
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> List[str]:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase_ , lowercase_ )
else:
raise Exception("Indices out of bounds" )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowercase_ ) for y in range(self.__width )
]
return sum(lowercase_ )
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : list[list[float]] = [[0] * n for _ in range(A_)]
return Matrix(A_ , A_ , A_)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
random.seed(A_)
lowercase__ : list[list[float]] = [
[random.randint(A_ , A_) for _ in range(A_)] for _ in range(A_)
]
return Matrix(A_ , A_ , A_)
| 358 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCamelCase = logging.getLogger(__name__)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict):
if os.path.exists(_lowercase):
if os.path.exists(os.path.join(_lowercase , "config.json")) and os.path.isfile(
os.path.join(_lowercase , "config.json")):
os.remove(os.path.join(_lowercase , "config.json"))
if os.path.exists(os.path.join(_lowercase , "pytorch_model.bin")) and os.path.isfile(
os.path.join(_lowercase , "pytorch_model.bin")):
os.remove(os.path.join(_lowercase , "pytorch_model.bin"))
else:
os.makedirs(_lowercase)
model.save_pretrained(_lowercase)
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : Union[str, Any] = 2
if unlogit:
lowercase__ : Dict = torch.pow(_lowercase , _lowercase)
lowercase__ : int = p * torch.log(_lowercase)
lowercase__ : Tuple = 0
return -plogp.sum(dim=-1)
def lowercase_ ( _lowerCamelCase : int):
logger.info("lv, h >\t" + "\t".join(f'''{x + 1}''' for x in range(len(_lowercase))))
for row in range(len(_lowercase)):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:d}''' for x in tensor[row].cpu().data))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : int=None , _lowerCamelCase : List[str]=False):
lowercase__ , lowercase__ : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase__ : Any = torch.zeros(_lowercase , _lowercase).to(args.device)
lowercase__ : Union[str, Any] = torch.zeros(_lowercase , _lowercase).to(args.device)
if head_mask is None:
lowercase__ : Dict = torch.ones(_lowercase , _lowercase).to(args.device)
head_mask.requires_grad_(requires_grad=_lowercase)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = 0.0
lowercase__ : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0])):
lowercase__ : Optional[int] = tuple(t.to(args.device) for t in inputs)
((lowercase__) , ) : Tuple = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase__ : Tuple = model(_lowercase , labels=_lowercase , head_mask=_lowercase)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase__ , lowercase__ , lowercase__ : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowercase):
lowercase__ : str = entropy(attn.detach() , _lowercase)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowercase).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase__ : Optional[Any] = 2
lowercase__ : str = torch.pow(torch.pow(_lowercase , _lowercase).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1E-20
if not args.dont_normalize_global_importance:
lowercase__ : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies")
print_ad_tensor(_lowercase)
if compute_importance:
logger.info("Head importance scores")
print_ad_tensor(_lowercase)
logger.info("Head ranked by importance scores")
lowercase__ : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
lowercase__ : Dict = torch.arange(
head_importance.numel() , device=args.device)
lowercase__ : Tuple = head_ranks.view_as(_lowercase)
print_ad_tensor(_lowercase)
return attn_entropy, head_importance, total_loss
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str):
lowercase__ , lowercase__ , lowercase__ : Dict = compute_heads_importance(_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase)
lowercase__ : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowercase , original_score * args.masking_threshold)
lowercase__ : List[Any] = torch.ones_like(_lowercase)
lowercase__ : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount))
lowercase__ : List[Any] = original_score
while current_score >= original_score * args.masking_threshold:
lowercase__ : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase__ : Optional[Any] = float("Inf")
lowercase__ : List[str] = head_importance.view(-1).sort()[1]
if len(_lowercase) <= num_to_mask:
print("BREAK BY num_to_mask")
break
# mask heads
lowercase__ : Any = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist()))
lowercase__ : str = new_head_mask.view(-1)
lowercase__ : Tuple = 0.0
lowercase__ : Tuple = new_head_mask.view_as(_lowercase)
lowercase__ : Dict = new_head_mask.clone().detach()
print_ad_tensor(_lowercase)
# Compute metric and head importance again
lowercase__ , lowercase__ , lowercase__ : Optional[int] = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , head_mask=_lowercase)
lowercase__ : str = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask")
print_ad_tensor(_lowercase)
np.save(os.path.join(args.output_dir , "head_mask.npy") , head_mask.detach().cpu().numpy())
return head_mask
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : int):
lowercase__ : List[str] = datetime.now()
lowercase__ , lowercase__ , lowercase__ : List[str] = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase)
lowercase__ : List[Any] = 1 / loss
lowercase__ : Any = datetime.now() - before_time
lowercase__ : Any = sum(p.numel() for p in model.parameters())
lowercase__ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowercase))
}
for k, v in heads_to_prune.items():
if isinstance(_lowercase , _lowercase):
lowercase__ : int = [
v,
]
assert sum(len(_lowercase) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowercase)
lowercase__ : Tuple = sum(p.numel() for p in model.parameters())
lowercase__ : Optional[int] = datetime.now()
lowercase__ , lowercase__ , lowercase__ : List[Any] = compute_heads_importance(
_lowercase , _lowercase , _lowercase , compute_entropy=_lowercase , compute_importance=_lowercase , head_mask=_lowercase , actually_pruned=_lowercase , )
lowercase__ : Optional[Any] = 1 / loss
lowercase__ : str = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowercase , _lowercase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowercase , _lowercase)
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100)
save_model(_lowercase , args.output_dir)
def lowercase_ ( ):
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowercase , type=_lowercase , required=_lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowercase , type=_lowercase , required=_lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowercase , type=_lowercase , required=_lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowercase , type=_lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances.")
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory")
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers")
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy.")
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowercase , help="Amount to heads to masking at each masking step.")
parser.add_argument("--metric_name" , default="acc" , type=_lowercase , help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowercase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowercase , help="Batch size.")
parser.add_argument("--seed" , type=_lowercase , default=42)
parser.add_argument("--local_rank" , type=_lowercase , default=-1 , help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available")
parser.add_argument("--server_ip" , type=_lowercase , default="" , help="Can be used for distant debugging.")
parser.add_argument("--server_port" , type=_lowercase , default="" , help="Can be used for distant debugging.")
lowercase__ : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowercase)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase__ : List[str] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
lowercase__ : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
lowercase__ : int = torch.device("cuda" , args.local_rank)
lowercase__ : Union[str, Any] = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1)))
lowercase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
lowercase__ : List[Any] = nn.parallel.DistributedDataParallel(
_lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowercase)
elif args.n_gpu > 1:
lowercase__ : Any = nn.DataParallel(_lowercase)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowercase)
torch.save(_lowercase , os.path.join(args.output_dir , "run_args.bin"))
logger.info("Training/evaluation parameters %s" , _lowercase)
# Prepare dataset
lowercase__ : List[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
lowercase__ : Dict = (torch.from_numpy(_lowercase),)
lowercase__ : Tuple = TensorDataset(*_lowercase)
lowercase__ : Optional[Any] = RandomSampler(_lowercase)
lowercase__ : Tuple = DataLoader(_lowercase , sampler=_lowercase , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(_lowercase , _lowercase , _lowercase)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase__ : int = mask_heads(_lowercase , _lowercase , _lowercase)
prune_heads(_lowercase , _lowercase , _lowercase , _lowercase)
if __name__ == "__main__":
main()
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
from functools import lru_cache
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = 2
lowercase__ : Dict = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase)
if n > 1:
factors.add(_lowerCamelCase)
return factors
@lru_cache
def lowercase_ ( _lowerCamelCase : int):
return len(unique_prime_factors(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : list):
return len(set(_lowerCamelCase)) in (0, 1)
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : str = 2
while True:
# Increment each value of a generated range
lowercase__ : Union[str, Any] = [base + i for i in range(_lowerCamelCase)]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ : Union[str, Any] = [upf_len(_lowerCamelCase) for x in group]
checker.append(_lowerCamelCase)
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase):
return group
# Increment our base variable by 1
base += 1
def lowercase_ ( _lowerCamelCase : int = 4):
lowercase__ : Union[str, Any] = run(_lowerCamelCase)
return results[0] if len(_lowerCamelCase) else None
if __name__ == "__main__":
print(solution())
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
UpperCamelCase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase = concatenate_datasets
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadManager
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 361 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict):
"""simple docstring"""
return round(float(moles / volume) * nfactor)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Any):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume)))
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure)))
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 0 |
from __future__ import annotations
def lowercase_ ( _lowerCamelCase : list[list[int]]):
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowercase)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowercase)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase = {ord(char) for char in VALID_CHARS}
UpperCamelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase_ ( _lowerCamelCase : list[int] , _lowerCamelCase : tuple[int, ...]) -> str | None:
lowercase__ : int = ""
lowercase__ : Optional[int] = 42
lowercase__ : Optional[int] = 42
lowercase__ : Optional[int] = 42
for keychar, cipherchar in zip(cycle(_lowerCamelCase) , _lowerCamelCase):
lowercase__ : Optional[int] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase)
return decoded
def lowercase_ ( _lowerCamelCase : list[int]) -> list[str]:
lowercase__ : List[Any] = []
for key in product(_lowerCamelCase , repeat=3):
lowercase__ : Tuple = try_key(_lowerCamelCase , _lowerCamelCase)
if encoded is not None:
possibles.append(_lowerCamelCase)
return possibles
def lowercase_ ( _lowerCamelCase : list[str] , _lowerCamelCase : str) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase_ ( _lowerCamelCase : str = "p059_cipher.txt") -> int:
lowercase__ : Optional[Any] = 42
lowercase__ : List[Any] = 42
lowercase__ : Optional[Any] = 42
lowercase__ : List[str] = 42
lowercase__ : Tuple = Path(_lowerCamelCase).parent.joinpath(_lowerCamelCase).read_text(encoding="utf-8")
lowercase__ : int = [int(_lowerCamelCase) for number in data.strip().split(",")]
lowercase__ : Union[str, Any] = filter_valid_chars(_lowerCamelCase)
for common_word in COMMON_WORDS:
lowercase__ : Optional[Any] = filter_common_word(_lowerCamelCase , _lowerCamelCase)
if len(_lowerCamelCase) == 1:
break
lowercase__ : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase) for char in decoded_text)
if __name__ == "__main__":
print(f"{solution() = }")
| 364 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import numpy as np
def lowercase_ ( _lowerCamelCase : np.array):
return (2 / (1 + np.exp(-2 * vector))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( _lowerCAmelCase ,unittest.TestCase ):
__A : Optional[int] = DanceDiffusionPipeline
__A : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : List[str] = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
__A : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : Any = False
__A : Optional[Any] = False
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Dict = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , use_timestep_embedding=SCREAMING_SNAKE_CASE_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowercase__ : Any = IPNDMScheduler()
lowercase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Any=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith("mps" ):
lowercase__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def __UpperCamelCase ( self : str ) -> Dict:
lowercase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[str] = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ )
lowercase__ : int = output.audios
lowercase__ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase__ : List[Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __UpperCamelCase ( self : Tuple ) -> Any:
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : List[str] ) -> Any:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return super().test_save_load_optional_components()
@skip_mps
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
return super().test_attention_slicing_forward_pass()
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
lowercase__ : Any = torch_device
lowercase__ : Tuple = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowercase__ : int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase__ : Optional[Any] = output.audios
lowercase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ : Optional[Any] = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
lowercase__ : Dict = torch_device
lowercase__ : Optional[Any] = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowercase__ : Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = torch.manual_seed(0 )
lowercase__ : int = pipe(generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
lowercase__ : int = output.audios
lowercase__ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ : Dict = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 366 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 367 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, '''r''', encoding='''utf-8''') as f:
UpperCamelCase = json.load(f)
@require_torch
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str , lowercase_ : List[Any] ) -> Dict:
return FSMTTokenizer.from_pretrained(lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Any:
lowercase__ : Dict = FSMTForConditionalGeneration.from_pretrained(lowercase_ ).to(lowercase_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
] )
@slow
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> str:
lowercase__ : int = F'''facebook/wmt19-{pair}'''
lowercase__ : Optional[int] = self.get_tokenizer(lowercase_ )
lowercase__ : Any = self.get_model(lowercase_ )
lowercase__ : List[Any] = bleu_data[pair]['src']
lowercase__ : Optional[Any] = bleu_data[pair]['tgt']
lowercase__ : int = tokenizer(lowercase_ , return_tensors="pt" , truncation=lowercase_ , padding="longest" ).to(lowercase_ )
lowercase__ : int = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowercase__ : Any = tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase__ : Optional[int] = calculate_bleu(lowercase_ , lowercase_ )
print(lowercase_ )
self.assertGreaterEqual(scores["bleu"] , lowercase_ )
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : int = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_ :
def __init__( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]=13 , lowercase_ : Optional[int]=7 , lowercase_ : Union[str, Any]=True , lowercase_ : str=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=99 , lowercase_ : Dict=32 , lowercase_ : Tuple=2 , lowercase_ : str=4 , lowercase_ : str=37 , lowercase_ : Any="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=5_12 , lowercase_ : Tuple=16 , lowercase_ : str=2 , lowercase_ : List[str]=0.02 , lowercase_ : List[str]=3 , lowercase_ : Optional[int]=4 , lowercase_ : List[Any]=None , ) -> int:
lowercase__ : List[str] = parent
lowercase__ : List[str] = 13
lowercase__ : List[str] = 7
lowercase__ : List[Any] = True
lowercase__ : Union[str, Any] = True
lowercase__ : int = True
lowercase__ : str = True
lowercase__ : Dict = 99
lowercase__ : int = 3_84
lowercase__ : int = 2
lowercase__ : Dict = 4
lowercase__ : str = 37
lowercase__ : Optional[int] = "gelu"
lowercase__ : List[str] = 0.1
lowercase__ : Optional[int] = 0.1
lowercase__ : str = 5_12
lowercase__ : Optional[Any] = 16
lowercase__ : Dict = 2
lowercase__ : Tuple = 0.02
lowercase__ : Any = 3
lowercase__ : str = 4
lowercase__ : Union[str, Any] = 1_28
lowercase__ : str = 2
lowercase__ : Any = 9
lowercase__ : Tuple = 1
lowercase__ : Dict = None
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : int = None
if self.use_input_mask:
lowercase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : str = None
if self.use_token_type_ids:
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Optional[int] = None
lowercase__ : int = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[str] ) -> Dict:
lowercase__ : Dict = TFConvBertModel(config=_lowercase )
lowercase__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ : Any = [input_ids, input_mask]
lowercase__ : List[Any] = model(_lowercase )
lowercase__ : str = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : List[str] ) -> Tuple:
lowercase__ : Dict = TFConvBertForMaskedLM(config=_lowercase )
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[Any] ) -> Optional[Any]:
lowercase__ : Tuple = self.num_labels
lowercase__ : int = TFConvBertForSequenceClassification(config=_lowercase )
lowercase__ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : List[str] ) -> Union[str, Any]:
lowercase__ : Tuple = self.num_choices
lowercase__ : Tuple = TFConvBertForMultipleChoice(config=_lowercase )
lowercase__ : Any = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : List[Any] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Any = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase__ : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[Any] ) -> str:
lowercase__ : Tuple = self.num_labels
lowercase__ : Tuple = TFConvBertForTokenClassification(config=_lowercase )
lowercase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Tuple ) -> Union[str, Any]:
lowercase__ : int = TFConvBertForQuestionAnswering(config=_lowercase )
lowercase__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : str = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = config_and_inputs
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class snake_case_ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
__A : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : str = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : Dict = False
__A : List[Any] = False
__A : str = False
def __UpperCamelCase ( self : Optional[int] ) -> int:
lowercase__ : Optional[int] = TFConvBertModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> List[Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __UpperCamelCase ( self : Dict ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def __UpperCamelCase ( self : str ) -> List[Any]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def __UpperCamelCase ( self : int ) -> List[str]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = True
lowercase__ : List[Any] = True
if hasattr(_lowercase , "use_cache" ):
lowercase__ : Any = True
lowercase__ : Any = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ : Optional[Any] = getattr(self.model_tester , "key_length" , _lowercase )
for model_class in self.all_model_classes:
lowercase__ : Any = self._prepare_for_class(_lowercase , _lowercase )
lowercase__ : Any = model_class(_lowercase )
lowercase__ : Union[str, Any] = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
lowercase__ : Dict = os.path.join(_lowercase , "saved_model" , "1" )
lowercase__ : Any = tf.keras.models.load_model(_lowercase )
lowercase__ : int = model(_lowercase )
if self.is_encoder_decoder:
lowercase__ : Optional[Any] = outputs["encoder_hidden_states"]
lowercase__ : Optional[Any] = outputs["encoder_attentions"]
else:
lowercase__ : Any = outputs["hidden_states"]
lowercase__ : Tuple = outputs["attentions"]
self.assertEqual(len(_lowercase ) , _lowercase )
lowercase__ : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : Tuple = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_lowercase )
def __UpperCamelCase ( self : Dict ) -> List[str]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = True
lowercase__ : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
lowercase__ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
lowercase__ : Tuple = getattr(self.model_tester , "key_length" , _lowercase )
lowercase__ : List[Any] = getattr(self.model_tester , "key_length" , _lowercase )
def check_decoder_attentions_output(lowercase_ : Union[str, Any] ):
lowercase__ : Optional[Any] = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
lowercase__ : List[str] = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase_ : int ):
lowercase__ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase__ : int = True
lowercase__ : Dict = False
lowercase__ : int = model_class(_lowercase )
lowercase__ : Any = model(self._prepare_for_class(_lowercase , _lowercase ) )
lowercase__ : Tuple = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
lowercase__ : int = model_class(_lowercase )
lowercase__ : List[Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ : Tuple = True
lowercase__ : List[Any] = model_class(_lowercase )
lowercase__ : str = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
lowercase__ : int = True
lowercase__ : Optional[Any] = True
lowercase__ : int = model_class(_lowercase )
lowercase__ : Optional[Any] = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : Dict = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
lowercase__ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Optional[Any] = model(_lowercase )[0]
lowercase__ : int = [1, 6, 7_68]
self.assertEqual(output.shape , _lowercase )
lowercase__ : Optional[int] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1E-4 )
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None)
UpperCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase = df.iloc[:, 1:2]
UpperCamelCase = actual_data.values.reshape(len_data, 1)
UpperCamelCase = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase = 10
UpperCamelCase = 5
UpperCamelCase = 20
UpperCamelCase = len_data - periods * look_back
UpperCamelCase = actual_data[:division]
UpperCamelCase = actual_data[division - look_back :]
UpperCamelCase , UpperCamelCase = [], []
UpperCamelCase , UpperCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase = np.array(train_x)
UpperCamelCase = np.array(test_x)
UpperCamelCase = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
UpperCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase = model.predict(x_test)
| 371 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 0 |
"""simple docstring"""
import requests
UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def lowercase_ ( _lowerCamelCase : List[Any]):
# fetching a list of articles in json format
lowercase__ : List[Any] = requests.get(_NEWS_API + bbc_news_api_key).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1):
print(f'''{i}.) {article["title"]}''')
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
| 350 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function")
lowercase__ : Any = hex_num[0] == """-"""
if is_negative:
lowercase__ : Any = hex_num[1:]
try:
lowercase__ : int = int(A__ , 16)
except ValueError:
raise ValueError("Invalid value was passed to the function")
lowercase__ : List[Any] = """"""
while int_num > 0:
lowercase__ : List[Any] = str(int_num % 2) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Any):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
lowercase__ : Union[str, Any] = np.full((len(lowerCAmelCase__), sequence_length, 2) , lowerCAmelCase__)
else:
lowercase__ : str = np.full((len(lowerCAmelCase__), sequence_length) , lowerCAmelCase__)
for i, tensor in enumerate(lowerCAmelCase__):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
lowercase__ : Any = tensor[:sequence_length]
else:
lowercase__ : List[Any] = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
lowercase__ : Dict = tensor[:sequence_length]
else:
lowercase__ : int = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase_ ( _lowerCamelCase : Optional[Any]):
lowercase__ : str = ord(lowerCAmelCase__)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowercase__ : List[str] = unicodedata.category(lowerCAmelCase__)
if cat.startswith("P"):
return True
return False
@dataclass
class snake_case_ ( __lowercase ):
__A : PreTrainedTokenizerBase
__A : Union[bool, str, PaddingStrategy] = True
__A : Optional[int] = None
__A : Optional[int] = None
__A : int = -100
__A : str = "pt"
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> Any:
import torch
lowercase__ : Union[str, Any] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__ : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ : Union[str, Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
lowercase__ : List[str] = torch.tensor(batch["entity_ids"] ).shape[1]
lowercase__ : Tuple = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ : Union[str, Any] = [
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase__ : Dict = [
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase__ : Dict = [feature['''ner_tags'''] for feature in features]
lowercase__ : Optional[Any] = padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase__ : Union[str, Any] = [feature['''original_entity_spans'''] for feature in features]
lowercase__ : Optional[int] = padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase__ : List[str] = {k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 352 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : List[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "ViTImageProcessor"
__A : int = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowercase_ : Tuple=None , lowercase_ : List[str]=None , **lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowercase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : int , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , **lowercase_ : Tuple ) -> Optional[int]:
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
lowercase__ : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None:
lowercase__ : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : Optional[int] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if visual_prompt is not None and images is not None:
lowercase__ : Any = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
lowercase__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
lowercase__ : int = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : Dict ) -> Tuple:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : List[Any] , *lowercase_ : str , **lowercase_ : List[Any] ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : str ) -> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 353 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 0 |
from __future__ import annotations
UpperCamelCase = 1.60_21E-19 # units = C
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative")
elif mobility < 0:
raise ValueError("mobility cannot be negative")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
import logging
from transformers import PretrainedConfig
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class snake_case_ ( UpperCamelCase__ ):
__A : List[Any] = '''bertabs'''
def __init__( self : List[str] , lowercase_ : Dict=3_05_22 , lowercase_ : Union[str, Any]=5_12 , lowercase_ : Optional[int]=6 , lowercase_ : int=5_12 , lowercase_ : Optional[int]=8 , lowercase_ : List[Any]=5_12 , lowercase_ : Tuple=0.2 , lowercase_ : List[Any]=6 , lowercase_ : Tuple=7_68 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=20_48 , lowercase_ : str=0.2 , **lowercase_ : Any , ) -> List[str]:
super().__init__(**UpperCamelCase_ )
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[int] = max_pos
lowercase__ : List[str] = enc_layers
lowercase__ : List[Any] = enc_hidden_size
lowercase__ : Tuple = enc_heads
lowercase__ : Any = enc_ff_size
lowercase__ : Dict = enc_dropout
lowercase__ : List[str] = dec_layers
lowercase__ : List[str] = dec_hidden_size
lowercase__ : List[str] = dec_heads
lowercase__ : Optional[Any] = dec_ff_size
lowercase__ : int = dec_dropout
| 355 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCamelCase = 'Create a default config file for Accelerate with only a few flags set.'
def lowercase_ ( _lowerCamelCase : Optional[int]="no" , _lowerCamelCase : str = default_json_config_file , _lowerCamelCase : bool = False):
lowercase__ : List[str] = Path(lowerCamelCase_)
path.parent.mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_)
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''')
return False
lowercase__ : Optional[int] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''')
lowercase__ : Union[str, Any] = {
'''compute_environment''': '''LOCAL_MACHINE''',
'''mixed_precision''': mixed_precision,
}
if torch.cuda.is_available():
lowercase__ : Optional[int] = torch.cuda.device_count()
lowercase__ : Tuple = num_gpus
lowercase__ : List[Any] = False
if num_gpus > 1:
lowercase__ : Any = '''MULTI_GPU'''
else:
lowercase__ : Optional[int] = '''NO'''
elif is_xpu_available() and use_xpu:
lowercase__ : Any = torch.xpu.device_count()
lowercase__ : str = num_xpus
lowercase__ : Union[str, Any] = False
if num_xpus > 1:
lowercase__ : Optional[int] = '''MULTI_XPU'''
else:
lowercase__ : List[str] = '''NO'''
elif is_npu_available():
lowercase__ : List[str] = torch.npu.device_count()
lowercase__ : Optional[int] = num_npus
lowercase__ : Union[str, Any] = False
if num_npus > 1:
lowercase__ : Tuple = '''MULTI_NPU'''
else:
lowercase__ : Any = '''NO'''
else:
lowercase__ : Dict = 0
lowercase__ : Dict = True
lowercase__ : Tuple = 1
lowercase__ : int = '''NO'''
lowercase__ : List[str] = ClusterConfig(**lowerCamelCase_)
config.to_json_file(lowerCamelCase_)
return path
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]):
lowercase__ : Any = parser.add_parser("default" , parents=lowerCamelCase_ , help=lowerCamelCase_ , formatter_class=lowerCamelCase_)
parser.add_argument(
"--config_file" , default=lowerCamelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=lowerCamelCase_ , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=lowerCamelCase_)
return parser
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location)
if config_file:
print(f'''accelerate configuration saved at {config_file}''')
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
import os
from distutils.util import strtobool
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]):
for e in env_keys:
lowercase__ : int = int(os.environ.get(UpperCAmelCase_ , -1))
if val >= 0:
return val
return default
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=False):
lowercase__ : Dict = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_))
return strtobool(UpperCAmelCase_) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple="no"):
lowercase__ : Optional[Any] = os.environ.get(UpperCAmelCase_ , str(UpperCAmelCase_))
return value
| 357 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( lowerCamelCase__ ,unittest.TestCase ):
__A : Union[str, Any] = CodeGenTokenizer
__A : Tuple = CodeGenTokenizerFast
__A : Any = True
__A : Any = {"add_prefix_space": True}
__A : List[str] = False
def __UpperCamelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowercase__ : Any = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowercase__ : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase__ : Tuple = {'unk_token': '<unk>'}
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__snake_case ) )
def __UpperCamelCase ( self : str , **lowercase_ : str ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __UpperCamelCase ( self : List[Any] , **lowercase_ : str ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Any = 'lower newer'
lowercase__ : Any = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> int:
lowercase__ : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : str = 'lower newer'
lowercase__ : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase__ : str = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
lowercase__ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase__ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
lowercase__ : int = self.get_tokenizer()
lowercase__ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
lowercase__ : List[Any] = 'lower newer'
# Testing tokenization
lowercase__ : List[Any] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
lowercase__ : Union[str, Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
lowercase__ : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
lowercase__ : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
lowercase__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
lowercase__ : str = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
lowercase__ : int = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
lowercase__ : Optional[int] = tokens + [rust_tokenizer.unk_token]
lowercase__ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def __UpperCamelCase ( self : int , *lowercase_ : Dict , **lowercase_ : List[str] ) -> Tuple:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any]=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
lowercase__ : int = 'This is a simple input'
lowercase__ : Dict = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Optional[Any] = ('This is a simple input', 'This is a pair')
lowercase__ : List[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="max_length" )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="max_length" )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="max_length" , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding="max_length" )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding="max_length" )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding="max_length" , )
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Dict = 'This is a simple input'
lowercase__ : Optional[Any] = ['This is a simple input looooooooong', 'This is a simple input']
lowercase__ : Dict = ('This is a simple input', 'This is a pair')
lowercase__ : Optional[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowercase__ : Union[str, Any] = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(__snake_case , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : int = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors="np" )
lowercase__ : Dict = tokenizer(*__snake_case , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __UpperCamelCase ( self : int ) -> Dict:
lowercase__ : int = '$$$'
lowercase__ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
lowercase__ : Optional[int] = 'This is a simple input'
lowercase__ : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2']
lowercase__ : Tuple = tokenizer.bos_token_id
lowercase__ : int = tokenizer(__snake_case )
lowercase__ : Any = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : Optional[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowercase__ : str = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
lowercase__ : Optional[int] = '\nif len_a > len_b: result = a\nelse: result = b'
lowercase__ : Dict = tokenizer.encode(__snake_case )
lowercase__ : Optional[int] = ['^#', re.escape("<|endoftext|>" ), '^\'\'\'', '^"""', '\n\n\n']
lowercase__ : Any = tokenizer.decode(__snake_case , truncate_before_pattern=__snake_case )
self.assertEqual(__snake_case , __snake_case )
def __UpperCamelCase ( self : List[str] ) -> int:
pass
| 358 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : int):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__)
def lowercase_ ( _lowerCamelCase : Union[str, Any] = 20):
lowercase__ : List[Any] = 1
for i in range(1 , n + 1):
lowercase__ : Dict = lcm(lowerCAmelCase__ , lowerCAmelCase__)
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase_ ( _lowerCamelCase : Tuple="ro" , _lowerCamelCase : Optional[int]="en" , _lowerCamelCase : Any="wmt16" , _lowerCamelCase : Tuple=None):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets")
lowercase__ : Optional[int] = f'''{src_lang}-{tgt_lang}'''
print(f'''Converting {dataset}-{pair}''')
lowercase__ : Optional[Any] = datasets.load_dataset(_lowerCamelCase , _lowerCamelCase)
if save_dir is None:
lowercase__ : Union[str, Any] = f'''{dataset}-{pair}'''
lowercase__ : Union[str, Any] = Path(_lowerCamelCase)
save_dir.mkdir(exist_ok=_lowerCamelCase)
for split in ds.keys():
print(f'''Splitting {split} with {ds[split].num_rows} records''')
# to save to val.source, val.target like summary datasets
lowercase__ : Tuple = "val" if split == "validation" else split
lowercase__ : List[str] = save_dir.joinpath(f'''{fn}.source''')
lowercase__ : Union[str, Any] = save_dir.joinpath(f'''{fn}.target''')
lowercase__ : Optional[int] = src_path.open("w+")
lowercase__ : Optional[Any] = tgt_path.open("w+")
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split]):
lowercase__ : Union[str, Any] = x["translation"]
src_fp.write(ex[src_lang] + "\n")
tgt_fp.write(ex[tgt_lang] + "\n")
print(f'''Saved {dataset} dataset to {save_dir}''')
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCamelCase = parse(importlib.metadata.version('''torch'''))
def lowercase_ ( _lowerCamelCase : Union[str, Version] , _lowerCamelCase : str , _lowerCamelCase : str):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}''')
lowercase__ : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Union[str, Any] = parse(importlib.metadata.version(_lowerCamelCase))
return operation(_lowerCamelCase , parse(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str):
return compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
| 361 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
UpperCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
UpperCamelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class snake_case_ ( __A ):
__A : Union[str, Any] = VOCAB_FILES_NAMES
__A : str = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ["input_ids", "attention_mask"]
__A : Dict = []
def __init__( self : List[str] , lowercase_ : Dict , lowercase_ : Tuple="<unk>" , lowercase_ : Dict="<s>" , lowercase_ : int="</s>" , lowercase_ : Dict="<pad>" , lowercase_ : Union[str, Any]="[SEP]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : List[Any] = None , **lowercase_ : Tuple , ) -> None:
lowercase__ : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
lowercase__ : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
lowercase__ : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
lowercase__ : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
lowercase__ : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
lowercase__ : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , mask_token=__a , cls_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self : Tuple ) -> Any:
lowercase__ : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ) -> List[str]:
lowercase__ : List[str] = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self : Union[str, Any] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
lowercase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ : Optional[Any] = {}
lowercase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Optional[int] ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def __UpperCamelCase ( self : Tuple , lowercase_ : str ) -> Union[str, Any]:
return self.sp_model.piece_to_id(__a )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] ) -> List[Any]:
lowercase__ : Optional[int] = self.sp_model.IdToPiece(__a )
return token
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> Dict:
lowercase__ : List[str] = []
lowercase__ : Union[str, Any] = ""
lowercase__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
lowercase__ : Optional[Any] = True
lowercase__ : int = []
else:
current_sub_tokens.append(__a )
lowercase__ : List[str] = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any = False , lowercase_ : Any = None , lowercase_ : str = True , **lowercase_ : str , ) -> str:
lowercase__ : Dict = kwargs.pop("use_source_tokenizer" , __a )
lowercase__ : Optional[int] = self.convert_ids_to_tokens(__a , skip_special_tokens=__a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowercase__ : Tuple = []
lowercase__ : Tuple = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
lowercase__ : Union[str, Any] = []
sub_texts.append(__a )
else:
current_sub_text.append(__a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowercase__ : Optional[int] = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(__a ) )
else:
lowercase__ : Optional[Any] = "".join(__a )
lowercase__ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowercase__ : Any = self.clean_up_tokenization(__a )
return clean_text
else:
return text
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : List[Any] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def __UpperCamelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : Any = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : int = [self.cls_token_id]
lowercase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : str , lowercase_ : Tuple , lowercase_ : List[Any] = None , lowercase_ : Dict = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any] = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 362 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''PoolFormerFeatureExtractor''']
UpperCamelCase = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
import warnings
from functools import wraps
from typing import Callable
def lowercase_ ( _lowerCamelCase : Callable) -> List[Any]:
@wraps(SCREAMING_SNAKE_CASE_)
def _inner_fn(*_lowerCamelCase : int , **_lowerCamelCase : str):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
return _inner_fn
| 364 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
# load base model
lowercase__ : Tuple = StableDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa)
# load LoRA weight from .safetensors
lowercase__ : Union[str, Any] = load_file(snake_case__)
lowercase__ : int = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowercase__ : Optional[Any] = key.split(".")[0].split(LORA_PREFIX_TEXT_ENCODER + "_")[-1].split("_")
lowercase__ : Optional[Any] = pipeline.text_encoder
else:
lowercase__ : Tuple = key.split(".")[0].split(LORA_PREFIX_UNET + "_")[-1].split("_")
lowercase__ : List[str] = pipeline.unet
# find the target layer
lowercase__ : Optional[Any] = layer_infos.pop(0)
while len(snake_case__) > -1:
try:
lowercase__ : Dict = curr_layer.__getattr__(snake_case__)
if len(snake_case__) > 0:
lowercase__ : Dict = layer_infos.pop(0)
elif len(snake_case__) == 0:
break
except Exception:
if len(snake_case__) > 0:
temp_name += "_" + layer_infos.pop(0)
else:
lowercase__ : Tuple = layer_infos.pop(0)
lowercase__ : List[Any] = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up"))
pair_keys.append(snake_case__)
else:
pair_keys.append(snake_case__)
pair_keys.append(key.replace("lora_up" , "lora_down"))
# update weight
if len(state_dict[pair_keys[0]].shape) == 4:
lowercase__ : Dict = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.floataa)
lowercase__ : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__).unsqueeze(2).unsqueeze(3)
else:
lowercase__ : Dict = state_dict[pair_keys[0]].to(torch.floataa)
lowercase__ : List[Any] = state_dict[pair_keys[1]].to(torch.floataa)
curr_layer.weight.data += alpha * torch.mm(snake_case__ , snake_case__)
# update visited list
for item in pair_keys:
visited.append(snake_case__)
return pipeline
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.base_model_path
UpperCamelCase = args.checkpoint_path
UpperCamelCase = args.dump_path
UpperCamelCase = args.lora_prefix_unet
UpperCamelCase = args.lora_prefix_text_encoder
UpperCamelCase = args.alpha
UpperCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 365 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase_ ( _lowerCamelCase : int = 200_0000):
lowercase__ : list[int] = [0]
lowercase__ : int
for idx in range(1 , ceil(sqrt(target * 2) * 1.1)):
triangle_numbers.append(triangle_numbers[-1] + idx)
# we want this to be as close as possible to target
lowercase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase__ : int = 0
# an estimate of b, using the quadratic formula
lowercase__ : float
# the largest integer less than b_estimate
lowercase__ : int
# the largest integer less than b_estimate
lowercase__ : int
# the triangle number corresponding to b_floor
lowercase__ : int
# the triangle number corresponding to b_ceil
lowercase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1):
lowercase__ : Optional[Any] = (-1 + sqrt(1 + 8 * target / triangle_a)) / 2
lowercase__ : Optional[int] = floor(_lowerCamelCase)
lowercase__ : Optional[int] = ceil(_lowerCamelCase)
lowercase__ : Tuple = triangle_numbers[b_floor]
lowercase__ : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a) < abs(
target - best_product):
lowercase__ : List[Any] = triangle_b_first_guess * triangle_a
lowercase__ : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a) < abs(
target - best_product):
lowercase__ : List[Any] = triangle_b_second_guess * triangle_a
lowercase__ : List[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 367 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ :
__A : str
__A : str = None
@staticmethod
def __UpperCamelCase ( ) -> Tuple:
raise NotImplementedError
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , **lowercase_ : int ) -> List[Any]:
raise NotImplementedError
def __UpperCamelCase ( self : Tuple , lowercase_ : List[Any] ) -> Tuple:
raise NotImplementedError
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def __UpperCamelCase ( cls : Any ) -> List[str]:
return F'''`pip install {cls.pip_package or cls.name}`'''
class snake_case_ ( UpperCamelCase__ ):
__A : int = """optuna"""
@staticmethod
def __UpperCamelCase ( ) -> List[str]:
return is_optuna_available()
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , **lowercase_ : Tuple ) -> List[str]:
return run_hp_search_optuna(__a , __a , __a , **__a )
def __UpperCamelCase ( self : List[str] , lowercase_ : Union[str, Any] ) -> List[str]:
return default_hp_space_optuna(__a )
class snake_case_ ( UpperCamelCase__ ):
__A : Dict = """ray"""
__A : str = """'ray[tune]'"""
@staticmethod
def __UpperCamelCase ( ) -> Any:
return is_ray_available()
def __UpperCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , **lowercase_ : Optional[int] ) -> Tuple:
return run_hp_search_ray(__a , __a , __a , **__a )
def __UpperCamelCase ( self : List[Any] , lowercase_ : List[str] ) -> List[Any]:
return default_hp_space_ray(__a )
class snake_case_ ( UpperCamelCase__ ):
__A : List[Any] = """sigopt"""
@staticmethod
def __UpperCamelCase ( ) -> int:
return is_sigopt_available()
def __UpperCamelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : str , **lowercase_ : Optional[Any] ) -> Dict:
return run_hp_search_sigopt(__a , __a , __a , **__a )
def __UpperCamelCase ( self : List[str] , lowercase_ : Tuple ) -> int:
return default_hp_space_sigopt(__a )
class snake_case_ ( UpperCamelCase__ ):
__A : List[Any] = """wandb"""
@staticmethod
def __UpperCamelCase ( ) -> Tuple:
return is_wandb_available()
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : str , **lowercase_ : Tuple ) -> Any:
return run_hp_search_wandb(__a , __a , __a , **__a )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int ) -> Tuple:
return default_hp_space_wandb(__a )
UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowercase_ ( ):
lowercase__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_) > 0:
lowercase__ : str = available_backends[0].name
if len(snake_case_) > 1:
logger.info(
f'''{len(snake_case_)} hyperparameter search backends available. Using {name} as the default.''')
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values()))
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
from __future__ import annotations
class snake_case_ :
def __init__( self : Union[str, Any] , lowercase_ : int ) -> Dict:
lowercase__ : Any = data
lowercase__ : Node | None = None
lowercase__ : Node | None = None
def lowercase_ ( _lowerCamelCase : Dict): # In Order traversal of the tree
if tree:
display(tree.left)
print(tree.data)
display(tree.right)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return 1 + max(depth_of_tree(tree.left) , depth_of_tree(tree.right)) if tree else 0
def lowercase_ ( _lowerCamelCase : str):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right)
else:
return not tree.left and not tree.right
def lowercase_ ( ): # Main function for testing.
lowercase__ : Optional[int] = Node(1)
lowercase__ : str = Node(2)
lowercase__ : int = Node(3)
lowercase__ : Optional[int] = Node(4)
lowercase__ : Any = Node(5)
lowercase__ : Tuple = Node(6)
lowercase__ : List[Any] = Node(7)
lowercase__ : Any = Node(8)
lowercase__ : str = Node(9)
print(is_full_binary_tree(lowerCAmelCase_))
print(depth_of_tree(lowerCAmelCase_))
print("Tree is: ")
display(lowerCAmelCase_)
if __name__ == "__main__":
main()
| 369 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv2FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class snake_case_ ( lowerCamelCase_ ):
__A : List[Any] = '''xmod'''
def __init__( self : List[Any] , lowercase_ : Dict=3_05_22 , lowercase_ : List[str]=7_68 , lowercase_ : Optional[Any]=12 , lowercase_ : Tuple=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : List[str]=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-12 , lowercase_ : Optional[Any]=1 , lowercase_ : Union[str, Any]=0 , lowercase_ : List[Any]=2 , lowercase_ : List[str]="absolute" , lowercase_ : Optional[Any]=True , lowercase_ : str=None , lowercase_ : Optional[int]=False , lowercase_ : List[str]=2 , lowercase_ : int=False , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=("en_XX",) , lowercase_ : str=None , **lowercase_ : str , ) -> Dict:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowercase__ : Dict = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : List[Any] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : str = max_position_embeddings
lowercase__ : Union[str, Any] = type_vocab_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = layer_norm_eps
lowercase__ : Any = position_embedding_type
lowercase__ : Dict = use_cache
lowercase__ : Tuple = classifier_dropout
lowercase__ : Tuple = pre_norm
lowercase__ : Optional[int] = adapter_reduction_factor
lowercase__ : Any = adapter_layer_norm
lowercase__ : Any = adapter_reuse_layer_norm
lowercase__ : int = ln_before_adapter
lowercase__ : Dict = list(__snake_case )
lowercase__ : Tuple = default_language
class snake_case_ ( lowerCamelCase_ ):
@property
def __UpperCamelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 371 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 0 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase = 10
def lowercase_ ( _lowerCamelCase : list[int]):
lowercase__ : List[Any] = 1
lowercase__ : int = max(_lowerCamelCase)
while placement <= max_digit:
# declare and initialize empty buckets
lowercase__ : list[list] = [[] for _ in range(_lowerCamelCase)]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase__ : List[Any] = int((i / placement) % RADIX)
buckets[tmp].append(_lowerCamelCase)
# put each buckets' contents into list_of_ints
lowercase__ : Optional[int] = 0
for b in range(_lowerCamelCase):
for i in buckets[b]:
lowercase__ : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowercase__ : Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowercase__ : Optional[Any] = json.loads(_lowerCamelCase)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowercase__ : List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowercase__ : List[Any] = json.loads(_lowerCamelCase)
if not mpi_options.get("sagemaker_mpi_enabled" , _lowerCamelCase):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class snake_case_ ( __A ):
__A : str = field(
default="" ,metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} ,)
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase_ , )
@cached_property
def __UpperCamelCase ( self : Dict ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowercase__ : List[Any] = torch.device("cpu" )
lowercase__ : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
lowercase__ : Tuple = smp.local_rank()
lowercase__ : Union[str, Any] = torch.device("cuda" , lowercase_ )
lowercase__ : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowercase__ : List[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowercase__ : Tuple = torch.device("cuda" , self.local_rank )
lowercase__ : Any = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowercase__ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowercase__ : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowercase__ : Dict = torch.device("cuda" , self.local_rank )
lowercase__ : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase_ )
return device
@property
def __UpperCamelCase ( self : Tuple ) -> List[str]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __UpperCamelCase ( self : str ) -> str:
return not is_sagemaker_model_parallel_available()
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
return False
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class snake_case_ ( __A ):
__A : Tuple = "trajectory_transformer"
__A : Tuple = ["past_key_values"]
__A : Union[str, Any] = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , lowercase_ : Any=1_00 , lowercase_ : List[Any]=5 , lowercase_ : Tuple=1 , lowercase_ : Dict=1 , lowercase_ : Any=2_49 , lowercase_ : Tuple=6 , lowercase_ : Optional[int]=17 , lowercase_ : Dict=25 , lowercase_ : List[str]=4 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=1_28 , lowercase_ : str=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.00_06 , lowercase_ : Tuple=5_12 , lowercase_ : Any=0.02 , lowercase_ : Any=1E-12 , lowercase_ : int=1 , lowercase_ : List[Any]=True , lowercase_ : int=1 , lowercase_ : Dict=5_02_56 , lowercase_ : Any=5_02_56 , **lowercase_ : Any , ) -> str:
lowercase__ : List[str] = vocab_size
lowercase__ : List[str] = action_weight
lowercase__ : Optional[Any] = reward_weight
lowercase__ : Any = value_weight
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Any = block_size
lowercase__ : str = action_dim
lowercase__ : int = observation_dim
lowercase__ : Any = transition_dim
lowercase__ : Optional[int] = learning_rate
lowercase__ : Union[str, Any] = n_layer
lowercase__ : Optional[Any] = n_head
lowercase__ : List[str] = n_embd
lowercase__ : Union[str, Any] = embd_pdrop
lowercase__ : List[Any] = attn_pdrop
lowercase__ : List[str] = resid_pdrop
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Union[str, Any] = layer_norm_eps
lowercase__ : Optional[int] = kaiming_initializer_range
lowercase__ : Any = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 352 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase_ ( _lowerCamelCase : Any):
lowercase__ : Optional[Any] = image.size
lowercase__ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"])
lowercase__ : Union[str, Any] = np.array(_lowerCamelCase).astype(np.floataa) / 255.0
lowercase__ : Union[str, Any] = image[None].transpose(0 , 3 , 1 , 2)
lowercase__ : Dict = torch.from_numpy(_lowerCamelCase)
return 2.0 * image - 1.0
class snake_case_ ( __A ):
def __init__( self : str , lowercase_ : VQModel , lowercase_ : UNetaDModel , lowercase_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowercase_ , unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Union[torch.Tensor, PIL.Image.Image] = None , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1_00 , lowercase_ : Optional[float] = 0.0 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : Any = 1
elif isinstance(lowercase_ , torch.Tensor ):
lowercase__ : List[Any] = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowercase_ )}''' )
if isinstance(lowercase_ , PIL.Image.Image ):
lowercase__ : str = preprocess(lowercase_ )
lowercase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Any = next(self.unet.parameters() ).dtype
lowercase__ : Optional[int] = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
lowercase__ : Tuple = image.to(device=self.device , dtype=lowercase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowercase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : List[str] = {}
if accepts_eta:
lowercase__ : int = eta
for t in self.progress_bar(lowercase_ ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : Optional[int] = torch.cat([latents, image] , dim=1 )
lowercase__ : Optional[int] = self.scheduler.scale_model_input(lowercase_ , lowercase_ )
# predict the noise residual
lowercase__ : Tuple = self.unet(lowercase_ , lowercase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Optional[int] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Union[str, Any] = self.vqvae.decode(lowercase_ ).sample
lowercase__ : str = torch.clamp(lowercase_ , -1.0 , 1.0 )
lowercase__ : Any = image / 2 + 0.5
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ : Optional[Any] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 353 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : int):
if not isinstance(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase)
if number < 0:
return False
lowercase__ : int = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( _lowerCamelCase : dict):
return (data["data"], data["target"])
def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray):
lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42)
xgb.fit(_lowerCamelCase , _lowerCamelCase)
# Predict target for test data
lowercase__ : Tuple = xgb.predict(_lowerCamelCase)
lowercase__ : Optional[int] = predictions.reshape(len(_lowerCamelCase) , 1)
return predictions
def lowercase_ ( ):
lowercase__ : Tuple = fetch_california_housing()
lowercase__ : Optional[Any] = data_handling(_lowerCamelCase)
lowercase__ : Tuple = train_test_split(
_lowerCamelCase , _lowerCamelCase , test_size=0.25 , random_state=1)
lowercase__ : Any = xgboost(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_lowerCamelCase , _lowerCamelCase)}''')
print(f'''Mean Square Error : {mean_squared_error(_lowerCamelCase , _lowerCamelCase)}''')
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 355 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : List[Any] = IFPipeline
__A : Dict = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
return self._get_dummy_components()
def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : int=0 ) -> Union[str, Any]:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : int = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Dict ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __UpperCamelCase ( self : int ) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCamelCase ( self : str ) -> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCamelCase ( self : Any ) -> List[str]:
self._test_save_load_local()
def __UpperCamelCase ( self : List[Any] ) -> str:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
# if
lowercase__ : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowercase__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowercase_ , tokenizer=lowercase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowercase__ : Optional[int] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase__ : Tuple = None
lowercase__ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase__ : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
lowercase__ : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase__ : Tuple = IFInpaintingPipeline(**pipe_a.components )
lowercase__ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : Any = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Union[str, Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : str = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[str] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Tuple = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Any ) -> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
lowercase__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowercase_ )
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , num_inference_steps=2 , generator=lowercase_ , output_type="np" , )
lowercase__ : str = output.images[0]
assert image.shape == (64, 64, 3)
lowercase__ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
# pipeline 2
_start_torch_memory_measurement()
lowercase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Optional[int] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowercase_ )
lowercase__ : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowercase_ )
lowercase__ : Optional[Any] = pipe_a(
prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , image=lowercase_ , mask_image=lowercase_ , original_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowercase__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 357 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 0 |
from collections.abc import Sequence
def lowercase_ ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : bool = False):
if not arr:
return 0
lowercase__ : Optional[Any] = 0 if allow_empty_subarrays else float("-inf")
lowercase__ : Dict = 0.0
for num in arr:
lowercase__ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num)
lowercase__ : Dict = max(_lowerCamelCase , _lowerCamelCase)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 358 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 0 |
import sys
import turtle
def lowercase_ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float]):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase_ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1)
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1)
triangle(_lowerCamelCase , get_mid(_lowerCamelCase , _lowerCamelCase) , get_mid(_lowerCamelCase , _lowerCamelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
UpperCamelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str = "cpu" , _lowerCamelCase : Union[str, None] = None):
lowercase__ : Optional[int] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase)
for k, v in tqdm(state_dict.items()):
if not isinstance(_lowerCamelCase , torch.Tensor):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin")
lowercase__ : List[str] = v.half()
if save_path is None: # overwrite src_path
lowercase__ : Optional[int] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
fire.Fire(convert)
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
# Lint as: python3
import itertools
import os
import re
UpperCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
UpperCamelCase = re.compile(R'''([a-z\d])([A-Z])''')
UpperCamelCase = re.compile(R'''(?<!_)_(?!_)''')
UpperCamelCase = re.compile(R'''(_{2,})''')
UpperCamelCase = R'''^\w+(\.\w+)*$'''
UpperCamelCase = R'''<>:/\|?*'''
def lowercase_ ( _lowerCamelCase : List[str]):
lowercase__ : Optional[Any] = _uppercase_uppercase_re.sub(R"\1_\2" , _lowerCamelCase)
lowercase__ : Union[str, Any] = _lowercase_uppercase_re.sub(R"\1_\2" , _lowerCamelCase)
return name.lower()
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Tuple = _single_underscore_re.split(_lowerCamelCase)
lowercase__ : str = [_multiple_underscores_re.split(_lowerCamelCase) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCamelCase) if n != "")
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if os.path.basename(_lowerCamelCase) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
return camelcase_to_snakecase(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]):
if os.path.basename(_lowerCamelCase) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''')
if not re.match(_split_re , _lowerCamelCase):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''')
return f'''{filename_prefix_for_name(_lowerCamelCase)}-{split}'''
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : str=None):
lowercase__ : int = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase)
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
lowercase__ : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase)
return f'''{filepath}*'''
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str=None , _lowerCamelCase : Dict=None):
lowercase__ : Any = filename_prefix_for_split(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase)
if shard_lengths:
lowercase__ : int = len(_lowerCamelCase)
lowercase__ : Any = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCamelCase)]
if filetype_suffix:
lowercase__ : Optional[Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
lowercase__ : str = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 361 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( __A ,__A ):
__A : Optional[Any] = "maskformer-swin"
__A : Optional[int] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , lowercase_ : Any=2_24 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=96 , lowercase_ : Optional[int]=[2, 2, 6, 2] , lowercase_ : int=[3, 6, 12, 24] , lowercase_ : Optional[Any]=7 , lowercase_ : Any=4.0 , lowercase_ : Tuple=True , lowercase_ : List[Any]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Any=False , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[Any]=1E-5 , lowercase_ : str=None , lowercase_ : Dict=None , **lowercase_ : List[Any] , ) -> str:
super().__init__(**lowercase_ )
lowercase__ : List[Any] = image_size
lowercase__ : List[str] = patch_size
lowercase__ : Dict = num_channels
lowercase__ : List[Any] = embed_dim
lowercase__ : str = depths
lowercase__ : List[str] = len(lowercase_ )
lowercase__ : str = num_heads
lowercase__ : int = window_size
lowercase__ : Optional[Any] = mlp_ratio
lowercase__ : Union[str, Any] = qkv_bias
lowercase__ : Any = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : str = drop_path_rate
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Dict = layer_norm_eps
lowercase__ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ : str = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowercase__ : Dict = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowercase__ : List[str] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 362 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCamelCase = sys.version_info >= (3, 10)
def lowercase_ ( _lowerCamelCase : Tuple=None , _lowerCamelCase : int=None):
return field(default_factory=lambda: default , metadata=_lowerCamelCase)
@dataclass
class snake_case_ :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class snake_case_ :
__A : int = 42
__A : str = field(default="toto" ,metadata={"help": "help message"} )
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : Optional[bool] = None
class snake_case_ ( __A ):
__A : str = "titi"
__A : List[str] = "toto"
class snake_case_ ( __A ):
__A : Optional[int] = "titi"
__A : Union[str, Any] = "toto"
__A : str = 42
@dataclass
class snake_case_ :
__A : BasicEnum = "toto"
def __UpperCamelCase ( self : int ) -> List[Any]:
lowercase__ : str = BasicEnum(self.foo )
@dataclass
class snake_case_ :
__A : MixedTypeEnum = "toto"
def __UpperCamelCase ( self : str ) -> str:
lowercase__ : int = MixedTypeEnum(self.foo )
@dataclass
class snake_case_ :
__A : Optional[int] = None
__A : Optional[float] = field(default=__A ,metadata={"help": "help message"} )
__A : Optional[str] = None
__A : Optional[List[str]] = list_field(default=[] )
__A : Optional[List[int]] = list_field(default=[] )
@dataclass
class snake_case_ :
__A : List[int] = list_field(default=[] )
__A : List[int] = list_field(default=[1, 2, 3] )
__A : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] )
__A : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case_ :
__A : List[int] = field()
__A : str = field()
__A : BasicEnum = field()
def __UpperCamelCase ( self : Any ) -> int:
lowercase__ : int = BasicEnum(self.required_enum )
@dataclass
class snake_case_ :
__A : int
__A : "BasicEnum" = field()
__A : "Optional[bool]" = None
__A : "str" = field(default="toto" ,metadata={"help": "help message"} )
__A : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case_ :
__A : bool = False
__A : bool = True
__A : bool | None = None
@dataclass
class snake_case_ :
__A : int | None = None
__A : float | None = field(default=__A ,metadata={"help": "help message"} )
__A : str | None = None
__A : list[str] | None = list_field(default=[] )
__A : list[int] | None = list_field(default=[] )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , lowercase_ : argparse.ArgumentParser , lowercase_ : argparse.ArgumentParser ) -> Tuple:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : Dict = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
lowercase__ : List[str] = {k: v for k, v in vars(lowercase_ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , lowercase_ ) and yy.get("choices" , lowercase_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](lowercase_ ) , yy["type"](lowercase_ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : Any = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--bar" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--baz" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--flag" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
(lowercase__ ) : str = parser.parse_args_into_dataclasses(lowercase_ , look_for_args_file=lowercase_ )
self.assertFalse(example.flag )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Optional[Any] = HfArgumentParser(lowercase_ )
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Dict:
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
expected.add_argument("--baz" , type=lowercase_ , default=lowercase_ , const=lowercase_ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=lowercase_ , dest="baz" )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
lowercase__ : Union[str, Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : str = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Tuple = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Union[str, Any] = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Dict = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , baz=lowercase_ , opt=lowercase_ ) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : Any = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : int = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Dict = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowercase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
@dataclass
class snake_case_ :
__A : Literal["titi", "toto", 42] = "toto"
lowercase__ : Union[str, Any] = HfArgumentParser(lowercase_ )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Tuple = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : int = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=lowercase_ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(
lowercase_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : List[Any] = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(lowercase_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--bar" , default=lowercase_ , type=lowercase_ , help="help message" )
expected.add_argument("--baz" , default=lowercase_ , type=lowercase_ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=lowercase_ )
expected.add_argument("--des" , nargs="+" , default=[] , type=lowercase_ )
lowercase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase_ )
for dataclass_type in dataclass_types:
lowercase__ : Any = HfArgumentParser(lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(lowercase_ , Namespace(foo=lowercase_ , bar=lowercase_ , baz=lowercase_ , ces=[] , des=[] ) )
lowercase__ : Dict = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(lowercase_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
lowercase__ : Any = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=lowercase_ , required=lowercase_ )
expected.add_argument("--required_str" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : Optional[int] = HfArgumentParser(lowercase_ )
lowercase__ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" , type=lowercase_ , required=lowercase_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=lowercase_ , )
expected.add_argument("--opt" , type=lowercase_ , default=lowercase_ )
expected.add_argument("--baz" , default="toto" , type=lowercase_ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=lowercase_ )
self.argparsersEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Dict ) -> str:
lowercase__ : str = HfArgumentParser(lowercase_ )
lowercase__ : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowercase__ : Optional[Any] = parser.parse_dict(lowercase_ )[0]
lowercase__ : Dict = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : List[Any] = HfArgumentParser(lowercase_ )
lowercase__ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(lowercase_ , parser.parse_dict , lowercase_ , allow_extra_keys=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Any = os.path.join(lowercase_ , "temp_json" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowercase__ : Optional[Any] = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> int:
lowercase__ : Tuple = HfArgumentParser(lowercase_ )
lowercase__ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = os.path.join(lowercase_ , "temp_yaml" )
os.mkdir(lowercase_ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(lowercase_ , lowercase_ )
lowercase__ : Any = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowercase__ : int = BasicExample(**lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : List[str] = HfArgumentParser(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case_ :
def __init__( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=3 , lowercase_ : Optional[int]=32 , lowercase_ : Dict=3 , lowercase_ : int=10 , lowercase_ : int=[8, 16, 32, 64] , lowercase_ : Union[str, Any]=[1, 1, 2, 1] , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : str="relu" , lowercase_ : Optional[Any]=3 , lowercase_ : Any=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : Optional[Any]=1 , ) -> Optional[Any]:
lowercase__ : Any = parent
lowercase__ : Dict = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : str = embeddings_size
lowercase__ : Any = hidden_sizes
lowercase__ : str = depths
lowercase__ : List[Any] = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : Optional[Any] = hidden_act
lowercase__ : List[str] = num_labels
lowercase__ : Tuple = scope
lowercase__ : Union[str, Any] = len(lowercase_ )
lowercase__ : Any = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Optional[Any] = num_groups
def __UpperCamelCase ( self : Dict ) -> int:
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> List[str]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Dict ) -> Union[str, Any]:
lowercase__ : Dict = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : str = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] ) -> int:
lowercase__ : List[str] = self.num_labels
lowercase__ : Optional[Any] = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Dict = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[int] ) -> str:
lowercase__ : List[str] = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Union[str, Any] = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : Tuple = None
lowercase__ : List[str] = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase__ : Any = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __A ,__A ,unittest.TestCase ):
__A : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__A : Dict = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__A : Union[str, Any] = False
__A : str = False
__A : Dict = False
__A : List[Any] = False
__A : Union[str, Any] = False
def __UpperCamelCase ( self : Dict ) -> Tuple:
lowercase__ : Tuple = BitModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
return
@unittest.skip(reason="Bit does not output attentions" )
def __UpperCamelCase ( self : str ) -> List[str]:
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
pass
def __UpperCamelCase ( self : Tuple ) -> Tuple:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(lowercase_ )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[str] = [*signature.parameters.keys()]
lowercase__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def __UpperCamelCase ( self : Any ) -> List[str]:
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int ):
lowercase__ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : str = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : int = layer_type
lowercase__ : int = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __UpperCamelCase ( self : str ) -> Any:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase_ ( ) -> Any:
lowercase__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : Tuple ) -> Dict:
lowercase__ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : Tuple = prepare_img()
lowercase__ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : str = model(**lowercase_ )
# verify the logits
lowercase__ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase__ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class snake_case_ ( __A ,unittest.TestCase ):
__A : List[Any] = (BitBackbone,) if is_torch_available() else ()
__A : List[str] = BitConfig
__A : List[str] = False
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : Any = BitModelTester(self )
| 364 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowercase_ ( ):
lowercase__ : List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowercase__ : int = get_sagemaker_input()
else:
lowercase__ : List[str] = get_cluster_input()
return config
def lowercase_ ( _lowerCamelCase : str=None):
if subparsers is not None:
lowercase__ : int = subparsers.add_parser("config" , description=_lowerCamelCase)
else:
lowercase__ : Dict = argparse.ArgumentParser("Accelerate config command" , description=_lowerCamelCase)
parser.add_argument(
"--config_file" , default=_lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : str = get_user_input()
if args.config_file is not None:
lowercase__ : List[str] = args.config_file
else:
if not os.path.isdir(_lowerCamelCase):
os.makedirs(_lowerCamelCase)
lowercase__ : Tuple = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(_lowerCamelCase)
else:
config.to_yaml_file(_lowerCamelCase)
print(f'''accelerate configuration saved at {config_file}''')
def lowercase_ ( ):
lowercase__ : List[Any] = config_command_parser()
lowercase__ : List[Any] = parser.parse_args()
config_command(_lowerCamelCase)
if __name__ == "__main__":
main()
| 365 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
# Initialise PyTorch model
lowercase__ : Dict = MobileBertConfig.from_json_file(_lowerCamelCase)
print(f'''Building PyTorch model from configuration: {config}''')
lowercase__ : Tuple = MobileBertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
lowercase__ : Any = load_tf_weights_in_mobilebert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 366 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 367 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : Optional[Any] = LongformerTokenizer
__A : Dict = True
__A : Any = LongformerTokenizerFast
__A : str = True
def __UpperCamelCase ( self : List[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : int = {"unk_token": "<unk>"}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def __UpperCamelCase ( self : str , **lowercase_ : List[Any] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Tuple , **lowercase_ : List[str] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : Tuple = "lower newer"
lowercase__ : Union[str, Any] = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : str = tokenizer.tokenize(lowercase_ ) # , add_prefix_space=True)
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
lowercase__ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowercase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCamelCase ( self : Any ) -> Dict:
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
lowercase__ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
lowercase__ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.encode(
"sequence builders" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Any = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : int = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase__ : Any = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : str = "Encode this sequence."
lowercase__ : List[str] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowercase__ : List[str] = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
lowercase__ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
lowercase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase_ , lowercase_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowercase__ : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
# Testing spaces after special tokens
lowercase__ : str = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ )} ) # mask token has a left space
lowercase__ : int = tokenizer.convert_tokens_to_ids(lowercase_ )
lowercase__ : Tuple = "Encode <mask> sequence"
lowercase__ : Optional[Any] = "Encode <mask>sequence"
lowercase__ : Optional[int] = tokenizer.encode(lowercase_ )
lowercase__ : Union[str, Any] = encoded.index(lowercase_ )
lowercase__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase_ , lowercase_ )
lowercase__ : List[Any] = tokenizer.encode(lowercase_ )
lowercase__ : str = encoded.index(lowercase_ )
lowercase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Any ) -> Any:
pass
def __UpperCamelCase ( self : Any ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase__ : List[str] = "A, <mask> AllenNLP sentence."
lowercase__ : Dict = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowercase__ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowercase__ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowercase_ )
self.assertEqual(post_processor_state["trim_offsets"] , lowercase_ )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : Union[str, Any] = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Tuple = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[int] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ), len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Any = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Optional[Any] = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : str = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
lowercase__ : str = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ )
lowercase__ : Dict = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ), 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
lowercase__ : Union[str, Any] = "ZinengTang/tvlt-base"
lowercase__ : List[Any] = tempfile.mkdtemp()
def __UpperCamelCase ( self : int , **lowercase_ : Dict ) -> str:
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : Tuple , **lowercase_ : List[str] ) -> Optional[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Any:
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_feature_extractor()
lowercase__ : Union[str, Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[Any] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowercase_ )
self.assertIsInstance(processor.image_processor , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Any = self.get_image_processor()
lowercase__ : Any = self.get_feature_extractor()
lowercase__ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : Dict = np.ones([1_20_00] )
lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" )
lowercase__ : List[Any] = processor(audio=lowercase_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowercase__ : int = self.get_image_processor()
lowercase__ : str = self.get_feature_extractor()
lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : List[Any] = np.ones([3, 2_24, 2_24] )
lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
lowercase__ : Any = processor(images=lowercase_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : int = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
lowercase__ : str = np.ones([1_20_00] )
lowercase__ : Dict = np.ones([3, 2_24, 2_24] )
lowercase__ : str = processor(audio=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> int:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : int = self.get_feature_extractor()
lowercase__ : Optional[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 369 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : Dict = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : Dict = 4.0 # Largest value for most examples
lowercase__ : str = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=(-1.0, 1.0) , lowercase_ : Optional[int]=False ) -> Any:
lowercase__ : Dict = output_range
if clip:
lowercase__ : List[str] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=(-1.0, 1.0) , lowercase_ : Optional[Any]=False ) -> Dict:
lowercase__ : Optional[Any] = input_range
lowercase__ : str = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
lowercase__ : List[str] = input_tokens > 0
lowercase__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> int:
lowercase__ : Tuple = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : Dict = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : str = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : Tuple = ones
lowercase__ : int = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : int = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Any = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : List[Any] = mel.cpu().float().numpy()
lowercase__ : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 371 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
UpperCamelCase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
UpperCamelCase = concatenate_datasets
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadManager
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadConfig
UpperCamelCase = DownloadMode
UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 350 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowercase__ : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
lowercase__ : str = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ : List[str] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ : Any = model(lowercase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
@slow
def __UpperCamelCase ( self : Any ) -> Tuple:
lowercase__ : Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
lowercase__ : int = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowercase__ : Any = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowercase__ : Tuple = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase__ : Union[str, Any] = model(lowercase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowercase_ , atol=1E-3 ) )
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ : int = model.config
lowercase__ : Tuple = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase__ : str = MBartConfig(
is_decoder=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , add_cross_attention=_lowerCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer) , scale_embedding=_lowerCamelCase , add_final_layer_norm=_lowerCamelCase , )
return encoder_config, decoder_config
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if "encoder.model" in name:
lowercase__ : Optional[Any] = name.replace("encoder.model" , "encoder")
if "decoder.model" in name:
lowercase__ : Any = name.replace("decoder.model" , "decoder")
if "patch_embed.proj" in name:
lowercase__ : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection")
if "patch_embed.norm" in name:
lowercase__ : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm")
if name.startswith("encoder"):
if "layers" in name:
lowercase__ : int = "encoder." + name
if "attn.proj" in name:
lowercase__ : Dict = name.replace("attn.proj" , "attention.output.dense")
if "attn" in name and "mask" not in name:
lowercase__ : List[str] = name.replace("attn" , "attention.self")
if "norm1" in name:
lowercase__ : Dict = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Union[str, Any] = name.replace("norm2" , "layernorm_after")
if "mlp.fc1" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : int = name.replace("mlp.fc2" , "output.dense")
if name == "encoder.norm.weight":
lowercase__ : str = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
lowercase__ : Union[str, Any] = "encoder.layernorm.bias"
return name
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]):
for key in orig_state_dict.copy().keys():
lowercase__ : str = orig_state_dict.pop(_lowerCamelCase)
if "qkv" in key:
lowercase__ : Tuple = key.split(".")
lowercase__ : int = int(key_split[3])
lowercase__ : List[Any] = int(key_split[5])
lowercase__ : Tuple = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase__ : Union[str, Any] = val[:dim, :]
lowercase__ : Any = val[dim : dim * 2, :]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : int = val[dim : dim * 2]
lowercase__ : List[Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=False):
# load original model
lowercase__ : List[Any] = DonutModel.from_pretrained(_lowerCamelCase).eval()
# load HuggingFace model
lowercase__ : Optional[Any] = get_configs(_lowerCamelCase)
lowercase__ : Tuple = DonutSwinModel(_lowerCamelCase)
lowercase__ : Optional[int] = MBartForCausalLM(_lowerCamelCase)
lowercase__ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase)
model.eval()
lowercase__ : Tuple = original_model.state_dict()
lowercase__ : int = convert_state_dict(_lowerCamelCase , _lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
# verify results on scanned document
lowercase__ : str = load_dataset("hf-internal-testing/example-documents")
lowercase__ : str = dataset["test"][0]["image"].convert("RGB")
lowercase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained(_lowerCamelCase , from_slow=_lowerCamelCase)
lowercase__ : Tuple = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1])
lowercase__ : Optional[Any] = DonutProcessor(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Dict = processor(_lowerCamelCase , return_tensors="pt").pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase__ : Optional[int] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
lowercase__ : List[Any] = "When is the coffee break?"
lowercase__ : List[Any] = task_prompt.replace("{user_input}" , _lowerCamelCase)
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase__ : int = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase__ : Any = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase__ : Optional[Any] = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase__ : Union[str, Any] = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase__ : Any = "hello world"
else:
raise ValueError("Model name not supported")
lowercase__ : Union[str, Any] = original_model.decoder.tokenizer(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors="pt")[
"input_ids"
]
lowercase__ : str = original_model.encoder.model.patch_embed(_lowerCamelCase)
lowercase__ : Any = model.encoder.embeddings(_lowerCamelCase)
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3)
# verify encoder hidden states
lowercase__ : Dict = original_model.encoder(_lowerCamelCase)
lowercase__ : Union[str, Any] = model.encoder(_lowerCamelCase).last_hidden_state
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-2)
# verify decoder hidden states
lowercase__ : int = original_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).logits
lowercase__ : Tuple = model(_lowerCamelCase , decoder_input_ids=_lowerCamelCase).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/")[-1] , commit_message="Update model")
processor.push_to_hub("nielsr/" + model_name.split("/")[-1] , commit_message="Update model")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
UpperCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : List[Any] = ["image_processor", "tokenizer"]
__A : Any = "AutoImageProcessor"
__A : Any = "AutoTokenizer"
def __init__( self : str , lowercase_ : List[str] , lowercase_ : Dict ) -> Dict:
super().__init__(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = self.image_processor
def __call__( self : Optional[Any] , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[Any]=None , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : List[str] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : Tuple = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : str , **lowercase_ : List[Any] ) -> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Tuple , *lowercase_ : Dict , **lowercase_ : List[Any] ) -> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Optional[int] ) -> str:
return ["input_ids", "attention_mask", "pixel_values"]
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[int]=None):
lowercase__ : int = None
if token is not None:
lowercase__ : Any = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
lowercase__ : Optional[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase__ : int = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json()
lowercase__ : str = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
lowercase__ : Tuple = math.ceil((result["total_count"] - 100) / 100)
for i in range(_lowerCamelCase):
lowercase__ : Optional[int] = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCamelCase).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]=None):
lowercase__ : Dict = None
if token is not None:
lowercase__ : Any = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
lowercase__ : Union[str, Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowercase__ : str = requests.get(_lowerCamelCase , headers=_lowerCamelCase).json()
lowercase__ : List[str] = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
lowercase__ : str = math.ceil((result["total_count"] - 100) / 100)
for i in range(_lowerCamelCase):
lowercase__ : Any = requests.get(url + f'''&page={i + 2}''' , headers=_lowerCamelCase).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Dict):
lowercase__ : Optional[Any] = None
if token is not None:
lowercase__ : Tuple = {"Accept": "application/vnd.github+json", "Authorization": f'''Bearer {token}'''}
lowercase__ : Any = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase)
lowercase__ : List[Any] = result.headers["Location"]
lowercase__ : str = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase)
lowercase__ : str = os.path.join(_lowerCamelCase , f'''{artifact_name}.zip''')
with open(_lowerCamelCase , "wb") as fp:
fp.write(response.content)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Dict=None):
lowercase__ : str = []
lowercase__ : str = []
lowercase__ : List[str] = None
with zipfile.ZipFile(_lowerCamelCase) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCamelCase) as f:
for line in f:
lowercase__ : Any = line.decode("UTF-8").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase__ : List[str] = line[: line.index(": ")]
lowercase__ : Tuple = line[line.index(": ") + len(": ") :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED "):
# `test` is the test method that failed
lowercase__ : Union[str, Any] = line[len("FAILED ") :]
failed_tests.append(_lowerCamelCase)
elif filename == "job_name.txt":
lowercase__ : List[str] = line
if len(_lowerCamelCase) != len(_lowerCamelCase):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase)} for `errors` '''
f'''and {len(_lowerCamelCase)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem.")
lowercase__ : Any = None
if job_name and job_links:
lowercase__ : Optional[Any] = job_links.get(_lowerCamelCase , _lowerCamelCase)
# A list with elements of the form (line of error, error, failed test)
lowercase__ : Any = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase)]
return result
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=None):
lowercase__ : Union[str, Any] = []
lowercase__ : Optional[int] = [os.path.join(_lowerCamelCase , _lowerCamelCase) for p in os.listdir(_lowerCamelCase) if p.endswith(".zip")]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase))
return errors
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=None):
lowercase__ : List[str] = Counter()
counter.update([x[1] for x in logs])
lowercase__ : Tuple = counter.most_common()
lowercase__ : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase__ : Union[str, Any] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase__ : Dict = dict(sorted(r.items() , key=lambda _lowerCamelCase: item[1]["count"] , reverse=_lowerCamelCase))
return r
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[Any] = test.split("::")[0]
if test.startswith("tests/models/"):
lowercase__ : Optional[Any] = test.split("/")[2]
else:
lowercase__ : List[Any] = None
return test
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any=None):
lowercase__ : Dict = [(x[0], x[1], get_model(x[2])) for x in logs]
lowercase__ : Dict = [x for x in logs if x[2] is not None]
lowercase__ : Optional[int] = {x[2] for x in logs}
lowercase__ : Dict = {}
for test in tests:
lowercase__ : int = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
lowercase__ : str = counter.most_common()
lowercase__ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase__ : Any = sum(error_counts.values())
if n_errors > 0:
lowercase__ : str = {"count": n_errors, "errors": error_counts}
lowercase__ : Dict = dict(sorted(r.items() , key=lambda _lowerCamelCase: item[1]["count"] , reverse=_lowerCamelCase))
return r
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
lowercase__ : str = "| no. | error | status |"
lowercase__ : List[str] = "|-:|:-|:-|"
lowercase__ : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowercase__ : Optional[int] = reduced_by_error[error]["count"]
lowercase__ : Union[str, Any] = f'''| {count} | {error[:100]} | |'''
lines.append(_lowerCamelCase)
return "\n".join(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = "| model | no. of errors | major error | count |"
lowercase__ : int = "|-:|-:|-:|-:|"
lowercase__ : str = [header, sep]
for model in reduced_by_model:
lowercase__ : List[str] = reduced_by_model[model]["count"]
lowercase__ : Dict = list(reduced_by_model[model]["errors"].items())[0]
lowercase__ : str = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_lowerCamelCase)
return "\n".join(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
UpperCamelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase = k.find(''' / ''')
UpperCamelCase = k[index + len(''' / ''') :]
UpperCamelCase = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase = reduce_by_error(errors)
UpperCamelCase = reduce_by_model(errors)
UpperCamelCase = make_github_table(reduced_by_error)
UpperCamelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 355 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive")
lowercase__ : Tuple = str(bin(_lowerCamelCase))[2:] # remove the leading "0b"
lowercase__ : List[Any] = str(bin(_lowerCamelCase))[2:]
lowercase__ : Any = max(len(_lowerCamelCase) , len(_lowerCamelCase))
return "0b" + "".join(
str(int("1" in (char_a, char_b)))
for char_a, char_b in zip(a_binary.zfill(_lowerCamelCase) , b_binary.zfill(_lowerCamelCase)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase = 300 # TEMPERATURE (unit = K)
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive")
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive")
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive")
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration")
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration")
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2)
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( __A ):
__A : Dict = (KDPMaDiscreteScheduler,)
__A : Optional[int] = 10
def __UpperCamelCase ( self : Tuple , **lowercase_ : Tuple ) -> Union[str, Any]:
lowercase__ : str = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_ )
def __UpperCamelCase ( self : Any ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : Dict = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : int = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : str = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : str = model(lowercase_ , lowercase_ )
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = output.prev_sample
lowercase__ : List[str] = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : int = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def __UpperCamelCase ( self : Dict ) -> Dict:
if torch_device == "mps":
return
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase__ : str = self.dummy_model()
lowercase__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : Optional[int] = sample.to(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : List[str] = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : Any = model(lowercase_ , lowercase_ )
lowercase__ : Tuple = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = output.prev_sample
lowercase__ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : List[Any] = torch.mean(torch.abs(lowercase_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
if torch_device == "mps":
return
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Any = scheduler_class(**lowercase_ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_ )
lowercase__ : List[Any] = self.dummy_model()
lowercase__ : Tuple = self.dummy_sample_deter.to(lowercase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ : Any = scheduler.scale_model_input(lowercase_ , lowercase_ )
lowercase__ : Any = model(lowercase_ , lowercase_ )
lowercase__ : List[str] = scheduler.step(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = output.prev_sample
lowercase__ : Tuple = torch.sum(torch.abs(lowercase_ ) )
lowercase__ : Any = torch.mean(torch.abs(lowercase_ ) )
if str(lowercase_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 358 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case_ ( __A ,unittest.TestCase ):
__A : Dict = UnCLIPImageVariationPipeline
__A : Optional[Any] = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
__A : str = IMAGE_VARIATION_BATCH_PARAMS
__A : Optional[Any] = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
__A : Optional[Any] = False
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
return 32
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
return 32
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
return self.time_input_dim
@property
def __UpperCamelCase ( self : str ) -> Any:
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : List[str] ) -> Dict:
return 1_00
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __UpperCamelCase ( self : Any ) -> Any:
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(lowercase_ )
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
lowercase__ : Tuple = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
lowercase__ : Any = UnCLIPTextProjModel(**lowercase_ )
return model
@property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
torch.manual_seed(0 )
lowercase__ : int = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
lowercase__ : List[Any] = UNetaDConditionModel(**lowercase_ )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowercase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowercase__ : List[str] = self.dummy_decoder
lowercase__ : List[Any] = self.dummy_text_proj
lowercase__ : Optional[Any] = self.dummy_text_encoder
lowercase__ : Union[str, Any] = self.dummy_tokenizer
lowercase__ : Dict = self.dummy_super_res_first
lowercase__ : Tuple = self.dummy_super_res_last
lowercase__ : Dict = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=10_00 , )
lowercase__ : Any = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=10_00 , )
lowercase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowercase__ : Union[str, Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=0 , lowercase_ : Any=True ) -> Dict:
lowercase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith("mps" ):
lowercase__ : int = torch.manual_seed(lowercase_ )
else:
lowercase__ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
if pil_image:
lowercase__ : List[str] = input_image * 0.5 + 0.5
lowercase__ : Union[str, Any] = input_image.clamp(0 , 1 )
lowercase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __UpperCamelCase ( self : int ) -> Tuple:
lowercase__ : Tuple = "cpu"
lowercase__ : Tuple = self.get_dummy_components()
lowercase__ : Optional[int] = self.pipeline_class(**lowercase_ )
lowercase__ : Optional[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[int] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : int = output.images
lowercase__ : Optional[Any] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : List[Any] = image[0, -3:, -3:, -1]
lowercase__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Dict = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Any ) -> Any:
lowercase__ : Tuple = "cpu"
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Union[str, Any] = self.pipeline_class(**lowercase_ )
lowercase__ : Union[str, Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : int = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : Any = output.images
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : str = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : str = image[0, -3:, -3:, -1]
lowercase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Tuple = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
lowercase__ : Tuple = "cpu"
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : Dict = self.pipeline_class(**lowercase_ )
lowercase__ : List[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Dict = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : int = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
lowercase__ : Optional[int] = pipe(**lowercase_ )
lowercase__ : List[Any] = output.images
lowercase__ : Any = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
lowercase__ : Union[str, Any] = pipe(
**lowercase_ , return_dict=lowercase_ , )[0]
lowercase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowercase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowercase__ : Optional[int] = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowercase__ : int = torch.device("cpu" )
class snake_case_ :
__A : Optional[int] = 1
lowercase__ : Optional[int] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**lowercase_ )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Optional[Any] = torch.Generator(device=lowercase_ ).manual_seed(0 )
lowercase__ : Optional[Any] = pipe.decoder.dtype
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowercase__ : List[Any] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
lowercase__ : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowercase__ : Optional[int] = pipe.prepare_latents(
lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() )
lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
lowercase__ : List[Any] = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ ).images
lowercase__ : List[str] = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ )
# Don't pass image, instead pass embedding
lowercase__ : Tuple = pipeline_inputs.pop("image" )
lowercase__ : Tuple = pipe.image_encoder(lowercase_ ).image_embeds
lowercase__ : str = pipe(
**lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ , image_embeddings=lowercase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __UpperCamelCase ( self : int ) -> Optional[int]:
lowercase__ : List[str] = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowercase__ : List[Any] = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , expected_max_diff=lowercase_ )
@skip_mps
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
lowercase__ : Optional[int] = torch_device == "cpu"
lowercase__ : List[str] = True
lowercase__ : Union[str, Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Dict = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowercase__ : List[Any] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase_ )
@skip_mps
def __UpperCamelCase ( self : int ) -> Optional[int]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return super().test_save_load_local()
@skip_mps
def __UpperCamelCase ( self : Any ) -> Dict:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
lowercase__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
lowercase__ : List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
lowercase__ : List[str] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
lowercase__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : List[Any] = pipeline(
lowercase_ , generator=lowercase_ , output_type="np" , )
lowercase__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ , 15 )
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ ( __A ):
__A : Any = ["image_processor", "tokenizer"]
__A : List[Any] = "CLIPImageProcessor"
__A : List[Any] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=None , **lowercase_ : Tuple ) -> Any:
lowercase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Any = kwargs.pop("feature_extractor" )
lowercase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : int , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : Dict=None , **lowercase_ : Union[str, Any] ) -> Dict:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : Optional[Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def __UpperCamelCase ( self : Tuple , *lowercase_ : Dict , **lowercase_ : Optional[Any] ) -> int:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Any , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Optional[int]:
lowercase__ : Optional[int] = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 361 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 0 |
UpperCamelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def lowercase_ ( _lowerCamelCase : float):
"""simple docstring"""
assert type(_lowerCamelCase) in (int, float) and decimal == int(_lowerCamelCase)
lowercase__ : Union[str, Any] = int(_lowerCamelCase)
lowercase__ : Optional[Any] = ""
lowercase__ : Any = False
if decimal < 0:
lowercase__ : str = True
decimal *= -1
while decimal > 0:
lowercase__ : Optional[int] = divmod(_lowerCamelCase , 16)
lowercase__ : Optional[Any] = values[remainder] + hexadecimal
lowercase__ : Tuple = "0x" + hexadecimal
if negative:
lowercase__ : int = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
lowercase__ : int = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(lowercase_ )
from datasets import load_dataset
lowercase__ : str = load_dataset("nielsr/rvlcdip-demo" )
lowercase__ : int = dataset["train"][0]["image"].convert("RGB" )
lowercase__ : Tuple = image_processor(lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**lowercase_ )
lowercase__ : List[Any] = outputs.logits
lowercase__ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
lowercase__ : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase : str = "https://www.worldometers.info/coronavirus") -> str:
lowercase__ : str = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser")
lowercase__ : Union[str, Any] = soup.findAll("h1")
lowercase__ : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"})
keys += soup.findAll("span" , {"class": "panel-title"})
values += soup.findAll("div" , {"class": "number-table-main"})
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"{key}\n{value}\n")
| 364 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A ,unittest.TestCase ):
__A : str = None
__A : List[Any] = BloomTokenizerFast
__A : Optional[Any] = BloomTokenizerFast
__A : Optional[int] = True
__A : Any = False
__A : List[Any] = "tokenizer_file"
__A : str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def __UpperCamelCase ( self : List[str] ) -> Dict:
super().setUp()
lowercase__ : Any = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Optional[Any] , **lowercase_ : Tuple ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : Dict = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowercase__ : Optional[Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowercase__ : Any = tokenizer.batch_encode_plus(lowercase_ )["input_ids"]
self.assertListEqual(lowercase_ , lowercase_ )
lowercase__ : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Tuple=6 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : List[Any] = "This is a simple input"
lowercase__ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : int = ("This is a simple input", "This is a pair")
lowercase__ : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
tokenizer_r.encode(lowercase_ , max_length=lowercase_ )
tokenizer_r.batch_encode_plus(lowercase_ , max_length=lowercase_ )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowercase__ : Optional[Any] = None # Hotfixing padding = None
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowercase_ )
lowercase__ : Tuple = next(iter(lowercase_ ) )["premise"] # pick up one data
lowercase__ : List[str] = list(sample_data.values() )
lowercase__ : Union[str, Any] = list(map(tokenizer.encode , lowercase_ ) )
lowercase__ : Union[str, Any] = [tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) for x in output_tokens]
self.assertListEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 365 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
from scipy.stats import pearsonr
import datasets
UpperCamelCase = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
UpperCamelCase = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
UpperCamelCase = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple=False ) -> Optional[Any]:
if return_pvalue:
lowercase__ : List[Any] = pearsonr(lowercase_ , lowercase_ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_ )[0] )}
| 366 | import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 333 | 0 |
import argparse
UpperCamelCase = '''docs/source/_static/js/custom.js'''
def lowercase_ ( _lowerCamelCase : Tuple):
with open(_lowerCamelCase , encoding="utf-8" , newline="\n") as f:
lowercase__ : str = f.readlines()
lowercase__ : Dict = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion ="):
index += 1
lowercase__ : Optional[int] = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {"):
index += 1
# We go until the end
while not lines[index].startswith("}"):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
UpperCamelCase = parser.parse_args()
update_custom_js(args.version)
| 367 | from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : int , **lowercase_ : List[str] ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> Any:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Any ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Dict , *lowercase_ : str , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Tuple ) -> List[str]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Any , **lowercase_ : Optional[int] ) -> List[str]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : Dict , *lowercase_ : Dict , **lowercase_ : Any ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Tuple:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[str] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : int , *lowercase_ : List[str] , **lowercase_ : List[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> int:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[Any] = ["flax"]
def __init__( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : Tuple , **lowercase_ : int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : List[Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Dict = ["flax"]
def __init__( self : Any , *lowercase_ : int , **lowercase_ : int ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : Union[str, Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : Union[str, Any] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : List[str] , **lowercase_ : str ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[Any] = ["flax"]
def __init__( self : List[Any] , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] , *lowercase_ : Any , **lowercase_ : int ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> List[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : Optional[int] = ["flax"]
def __init__( self : Any , *lowercase_ : str , **lowercase_ : Dict ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : str , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Dict:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : int = ["flax"]
def __init__( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : int , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] , *lowercase_ : Dict , **lowercase_ : int ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class snake_case_ ( metaclass=__A ):
__A : List[str] = ["flax"]
def __init__( self : Tuple , *lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : Any , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
@classmethod
def __UpperCamelCase ( cls : List[str] , *lowercase_ : Union[str, Any] , **lowercase_ : Dict ) -> List[Any]:
requires_backends(cls , ["flax"] )
| 333 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 368 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case_ ( __A ):
__A : List[str] = "vit_mae"
def __init__( self : List[Any] , lowercase_ : List[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Optional[Any]=30_72 , lowercase_ : str="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : int=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1E-12 , lowercase_ : Tuple=2_24 , lowercase_ : Any=16 , lowercase_ : Dict=3 , lowercase_ : List[Any]=True , lowercase_ : Dict=16 , lowercase_ : List[str]=5_12 , lowercase_ : Tuple=8 , lowercase_ : Any=20_48 , lowercase_ : int=0.75 , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(**lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Any = num_channels
lowercase__ : str = qkv_bias
lowercase__ : Optional[Any] = decoder_num_attention_heads
lowercase__ : Any = decoder_hidden_size
lowercase__ : Any = decoder_num_hidden_layers
lowercase__ : Union[str, Any] = decoder_intermediate_size
lowercase__ : int = mask_ratio
lowercase__ : Tuple = norm_pix_loss
| 333 | 0 |
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
lowercase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase)]
lowercase__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1 or len(_lowerCamelCase) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase):
lowercase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase__ : Optional[int] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase)
lowercase__ : Dict = ["".join(_lowerCamelCase) for row in temp_grid]
lowercase__ : Tuple = "".join(_lowerCamelCase)
return output_string
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
lowercase__ : Optional[int] = []
lowercase__ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative")
if key == 1:
return input_string
lowercase__ : list[list[str]] = [[] for _ in range(_lowerCamelCase)] # generates template
for position in range(len(_lowerCamelCase)):
lowercase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
lowercase__ : Optional[int] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
temp_grid[num].append("*")
lowercase__ : Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowercase__ : str = input_string[counter : counter + len(_lowerCamelCase)]
grid.append(list(_lowerCamelCase))
counter += len(_lowerCamelCase)
lowercase__ : Optional[int] = "" # reads as zigzag
for position in range(len(_lowerCamelCase)):
lowercase__ : int = position % (lowest * 2) # puts it in bounds
lowercase__ : List[Any] = min(_lowerCamelCase , lowest * 2 - num) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0)
return output_string
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Tuple = {}
for key_guess in range(1 , len(_lowerCamelCase)): # tries every key
lowercase__ : int = decrypt(_lowerCamelCase , _lowerCamelCase)
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
while a != 0:
lowercase__ , lowercase__ : Dict = b % a, a
return b
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
if gcd(_lowerCamelCase , _lowerCamelCase) != 1:
lowercase__ : Tuple = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase)
lowercase__ , lowercase__ , lowercase__ : Optional[int] = 1, 0, a
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = 0, 1, m
while va != 0:
lowercase__ : Tuple = ua // va
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 333 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case_ ( __A ):
__A : torch.FloatTensor
class snake_case_ ( nn.Module ):
def __init__( self : Any , lowercase_ : str=3 , lowercase_ : List[str]=3 , lowercase_ : List[Any]=("DownEncoderBlock2D",) , lowercase_ : Optional[int]=(64,) , lowercase_ : int=2 , lowercase_ : Optional[Any]=32 , lowercase_ : str="silu" , lowercase_ : Tuple=True , ) -> Optional[int]:
super().__init__()
lowercase__ : Tuple = layers_per_block
lowercase__ : Any = torch.nn.Convad(
lowercase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Any = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(lowercase_ ):
lowercase__ : List[str] = output_channel
lowercase__ : List[Any] = block_out_channels[i]
lowercase__ : Optional[int] = i == len(lowercase_ ) - 1
lowercase__ : Optional[int] = get_down_block(
lowercase_ , num_layers=self.layers_per_block , in_channels=lowercase_ , out_channels=lowercase_ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , )
self.down_blocks.append(lowercase_ )
# mid
lowercase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , )
# out
lowercase__ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase_ , eps=1E-6 )
lowercase__ : str = nn.SiLU()
lowercase__ : List[Any] = 2 * out_channels if double_z else out_channels
lowercase__ : Optional[int] = nn.Convad(block_out_channels[-1] , lowercase_ , 3 , padding=1 )
lowercase__ : Tuple = False
def __UpperCamelCase ( self : Any , lowercase_ : Optional[int] ) -> str:
lowercase__ : int = x
lowercase__ : Dict = self.conv_in(lowercase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase_ : Tuple ):
def custom_forward(*lowercase_ : List[str] ):
return module(*lowercase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase_ ) , lowercase_ , use_reentrant=lowercase_ )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , use_reentrant=lowercase_ )
else:
for down_block in self.down_blocks:
lowercase__ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ )
# middle
lowercase__ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase_ )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Optional[Any] = down_block(lowercase_ )
# middle
lowercase__ : List[str] = self.mid_block(lowercase_ )
# post-process
lowercase__ : Any = self.conv_norm_out(lowercase_ )
lowercase__ : int = self.conv_act(lowercase_ )
lowercase__ : Tuple = self.conv_out(lowercase_ )
return sample
class snake_case_ ( nn.Module ):
def __init__( self : int , lowercase_ : Dict=3 , lowercase_ : Any=3 , lowercase_ : Tuple=("UpDecoderBlock2D",) , lowercase_ : List[str]=(64,) , lowercase_ : Optional[Any]=2 , lowercase_ : int=32 , lowercase_ : Optional[Any]="silu" , lowercase_ : Union[str, Any]="group" , ) -> Tuple:
super().__init__()
lowercase__ : Optional[Any] = layers_per_block
lowercase__ : List[Any] = nn.Convad(
lowercase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Dict = None
lowercase__ : str = nn.ModuleList([] )
lowercase__ : Dict = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , )
# up
lowercase__ : Any = list(reversed(lowercase_ ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase_ ):
lowercase__ : int = output_channel
lowercase__ : Tuple = reversed_block_out_channels[i]
lowercase__ : Union[str, Any] = i == len(lowercase_ ) - 1
lowercase__ : List[str] = get_up_block(
lowercase_ , num_layers=self.layers_per_block + 1 , in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , resnet_time_scale_shift=lowercase_ , )
self.up_blocks.append(lowercase_ )
lowercase__ : Any = output_channel
# out
if norm_type == "spatial":
lowercase__ : List[str] = SpatialNorm(block_out_channels[0] , lowercase_ )
else:
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase_ , eps=1E-6 )
lowercase__ : Any = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , lowercase_ , 3 , padding=1 )
lowercase__ : List[str] = False
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any]=None ) -> List[Any]:
lowercase__ : Optional[Any] = z
lowercase__ : Union[str, Any] = self.conv_in(lowercase_ )
lowercase__ : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase_ : List[str] ):
def custom_forward(*lowercase_ : Union[str, Any] ):
return module(*lowercase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ )
lowercase__ : int = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ )
else:
# middle
lowercase__ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ )
lowercase__ : Any = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ )
else:
# middle
lowercase__ : str = self.mid_block(lowercase_ , lowercase_ )
lowercase__ : Dict = sample.to(lowercase_ )
# up
for up_block in self.up_blocks:
lowercase__ : Union[str, Any] = up_block(lowercase_ , lowercase_ )
# post-process
if latent_embeds is None:
lowercase__ : Any = self.conv_norm_out(lowercase_ )
else:
lowercase__ : List[Any] = self.conv_norm_out(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = self.conv_act(lowercase_ )
lowercase__ : Tuple = self.conv_out(lowercase_ )
return sample
class snake_case_ ( nn.Module ):
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str]=None , lowercase_ : str="random" , lowercase_ : Tuple=False , lowercase_ : Dict=True ) -> str:
super().__init__()
lowercase__ : int = n_e
lowercase__ : List[Any] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : Dict = legacy
lowercase__ : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : List[Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Optional[Any] = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Optional[Any] = self.re_embed
lowercase__ : Optional[Any] = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
lowercase__ : List[str] = n_e
lowercase__ : Tuple = sane_index_shape
def __UpperCamelCase ( self : List[str] , lowercase_ : int ) -> List[str]:
lowercase__ : Tuple = inds.shape
assert len(lowercase_ ) > 1
lowercase__ : Optional[Any] = inds.reshape(ishape[0] , -1 )
lowercase__ : Optional[Any] = self.used.to(lowercase_ )
lowercase__ : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Optional[Any] = match.argmax(-1 )
lowercase__ : Optional[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : Dict = self.unknown_index
return new.reshape(lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : Optional[Any] = inds.shape
assert len(lowercase_ ) > 1
lowercase__ : List[Any] = inds.reshape(ishape[0] , -1 )
lowercase__ : Optional[int] = self.used.to(lowercase_ )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : List[str] = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase_ )
return back.reshape(lowercase_ )
def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> int:
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Tuple = torch.argmin(torch.cdist(lowercase_ , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(lowercase_ ).view(z.shape )
lowercase__ : Optional[Any] = None
lowercase__ : Optional[int] = None
# compute loss for embedding
if not self.legacy:
lowercase__ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Dict = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Union[str, Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.remap_to_used(lowercase_ )
lowercase__ : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ) -> List[str]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Tuple = self.unmap_to_all(lowercase_ )
lowercase__ : List[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : Optional[int] = self.embedding(lowercase_ )
if shape is not None:
lowercase__ : Any = z_q.view(lowercase_ )
# reshape back to match original input shape
lowercase__ : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case_ ( __A ):
def __init__( self : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any]=False ) -> int:
lowercase__ : Any = parameters
lowercase__ : Tuple = torch.chunk(lowercase_ , 2 , dim=1 )
lowercase__ : str = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : str = deterministic
lowercase__ : Any = torch.exp(0.5 * self.logvar )
lowercase__ : List[Any] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=lowercase_ , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : Union[str, Any] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any]=None ) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self : Dict , lowercase_ : int , lowercase_ : int=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
return self.mean
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 371 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : list[float]):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative")
if not cash_flows:
raise ValueError("Cash flows list cannot be empty")
lowercase__ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase))
return round(_lowerCamelCase , ndigits=2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCamelCase = 4
UpperCamelCase = 3
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str]):
for shard in shards:
for i in range(_lowerCamelCase):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
lowercase__ : List[str] = int(os.environ["RANK"])
lowercase__ : Union[str, Any] = int(os.environ["WORLD_SIZE"])
lowercase__ : Union[str, Any] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowerCamelCase)
parser.add_argument("--local_rank" , type=_lowerCamelCase)
parser.add_argument("--num_workers" , type=_lowerCamelCase , default=0)
lowercase__ : int = parser.parse_args()
lowercase__ : Union[str, Any] = args.streaming
lowercase__ : List[Any] = args.num_workers
lowercase__ : Dict = {"shards": [f'''shard_{shard_idx}''' for shard_idx in range(_lowerCamelCase)]}
lowercase__ : int = IterableDataset.from_generator(_lowerCamelCase , gen_kwargs=_lowerCamelCase)
if not streaming:
lowercase__ : str = Dataset.from_list(list(_lowerCamelCase))
lowercase__ : List[str] = split_dataset_by_node(_lowerCamelCase , rank=_lowerCamelCase , world_size=_lowerCamelCase)
lowercase__ : Any = torch.utils.data.DataLoader(_lowerCamelCase , num_workers=_lowerCamelCase)
lowercase__ : Dict = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : Any = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
lowercase__ : List[str] = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 333 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int):
lowercase__ : List[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
lowercase__ : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw).convert("RGB")
lowercase__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711)),
])
lowercase__ : Optional[int] = transform(_lowerCamelCase).unsqueeze(0).to(_lowerCamelCase)
return image
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
if "visual_encoder" in key:
lowercase__ : int = re.sub("visual_encoder*" , "vision_model.encoder" , _lowerCamelCase)
if "blocks" in key:
lowercase__ : Optional[Any] = re.sub(R"blocks" , "layers" , _lowerCamelCase)
if "attn" in key:
lowercase__ : Optional[Any] = re.sub(R"attn" , "self_attn" , _lowerCamelCase)
if "norm1" in key:
lowercase__ : List[Any] = re.sub(R"norm1" , "layer_norm1" , _lowerCamelCase)
if "norm2" in key:
lowercase__ : int = re.sub(R"norm2" , "layer_norm2" , _lowerCamelCase)
if "encoder.norm" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.norm" , "post_layernorm" , _lowerCamelCase)
if "encoder.patch_embed.proj" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _lowerCamelCase)
if "encoder.pos_embed" in key:
lowercase__ : Union[str, Any] = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _lowerCamelCase)
if "encoder.cls_token" in key:
lowercase__ : Dict = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _lowerCamelCase)
if "self_attn" in key:
lowercase__ : List[Any] = re.sub(R"self_attn.proj" , "self_attn.projection" , _lowerCamelCase)
return key
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=None):
if config_path is not None:
lowercase__ : int = BlipConfig.from_pretrained(_lowerCamelCase)
else:
lowercase__ : Optional[int] = BlipConfig(projection_dim=512 , text_config={} , vision_config={})
lowercase__ : Any = BlipForConditionalGeneration(_lowerCamelCase).eval()
lowercase__ : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
lowercase__ : Tuple = blip_decoder(pretrained=_lowerCamelCase , image_size=384 , vit="base")
lowercase__ : Tuple = pt_model.eval()
lowercase__ : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Any = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : List[str] = rename_key(_lowerCamelCase)
lowercase__ : str = value
hf_model.load_state_dict(_lowerCamelCase)
lowercase__ : Optional[int] = 384
lowercase__ : Optional[Any] = load_demo_image(image_size=_lowerCamelCase , device="cpu")
lowercase__ : Optional[int] = BertTokenizer.from_pretrained("bert-base-uncased")
lowercase__ : Union[str, Any] = tokenizer(["a picture of"]).input_ids
lowercase__ : List[str] = hf_model.generate(_lowerCamelCase , _lowerCamelCase)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowercase__ : List[Any] = hf_model.generate(_lowerCamelCase)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_lowerCamelCase)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase__ : Tuple = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
lowercase__ : int = blip_vqa(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="base")
vqa_model.eval()
lowercase__ : Optional[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Dict = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : Tuple = rename_key(_lowerCamelCase)
lowercase__ : int = value
lowercase__ : Any = BlipForQuestionAnswering(_lowerCamelCase)
hf_vqa_model.load_state_dict(_lowerCamelCase)
lowercase__ : List[Any] = ["How many dogs are in this image?"]
lowercase__ : int = tokenizer(_lowerCamelCase , return_tensors="pt").input_ids
lowercase__ : int = hf_vqa_model.generate(_lowerCamelCase , _lowerCamelCase)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa")
lowercase__ : Union[str, Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
lowercase__ : Optional[Any] = blip_itm(pretrained=_lowerCamelCase , image_size=_lowerCamelCase , vit="base")
itm_model.eval()
lowercase__ : Optional[int] = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase__ : Union[str, Any] = modified_state_dict.pop(_lowerCamelCase)
lowercase__ : Any = rename_key(_lowerCamelCase)
lowercase__ : int = value
lowercase__ : Optional[Any] = BlipForImageTextRetrieval(_lowerCamelCase)
lowercase__ : Tuple = ["A picture of a woman with a dog sitting in a beach"]
lowercase__ : str = tokenizer(
_lowerCamelCase , return_tensors="pt" , padding="max_length" , truncation=_lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_lowerCamelCase)
hf_itm_model.eval()
lowercase__ : List[str] = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase)
lowercase__ : Optional[int] = hf_itm_model(_lowerCamelCase , _lowerCamelCase , use_itm_head=_lowerCamelCase)
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1)[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 351 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( __A ):
__A : List[str] = "unispeech"
def __init__( self : List[Any] , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=7_68 , lowercase_ : List[str]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=30_72 , lowercase_ : List[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.02 , lowercase_ : int=1E-5 , lowercase_ : int="group" , lowercase_ : Tuple="gelu" , lowercase_ : Dict=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowercase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , lowercase_ : List[str]=(10, 3, 3, 3, 3, 2, 2) , lowercase_ : int=False , lowercase_ : List[Any]=1_28 , lowercase_ : Optional[Any]=16 , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=0.05 , lowercase_ : Optional[Any]=10 , lowercase_ : Any=2 , lowercase_ : int=0.0 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=3_20 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=1_00 , lowercase_ : Dict=2_56 , lowercase_ : Optional[Any]=2_56 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]="mean" , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=False , lowercase_ : Dict=2_56 , lowercase_ : Union[str, Any]=80 , lowercase_ : int=0 , lowercase_ : Union[str, Any]=1 , lowercase_ : Dict=2 , lowercase_ : Optional[int]=0.5 , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
lowercase__ : List[str] = hidden_size
lowercase__ : Any = feat_extract_norm
lowercase__ : Optional[Any] = feat_extract_activation
lowercase__ : Dict = list(lowercase_ )
lowercase__ : Union[str, Any] = list(lowercase_ )
lowercase__ : List[str] = list(lowercase_ )
lowercase__ : List[str] = conv_bias
lowercase__ : Any = num_conv_pos_embeddings
lowercase__ : Dict = num_conv_pos_embedding_groups
lowercase__ : int = len(self.conv_dim )
lowercase__ : str = num_hidden_layers
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = num_attention_heads
lowercase__ : Union[str, Any] = hidden_dropout
lowercase__ : Any = attention_dropout
lowercase__ : Union[str, Any] = activation_dropout
lowercase__ : Any = feat_proj_dropout
lowercase__ : str = final_dropout
lowercase__ : int = layerdrop
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : Any = num_ctc_classes
lowercase__ : int = vocab_size
lowercase__ : str = do_stable_layer_norm
lowercase__ : Any = use_weighted_layer_sum
lowercase__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : List[Any] = apply_spec_augment
lowercase__ : Dict = mask_time_prob
lowercase__ : Tuple = mask_time_length
lowercase__ : str = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : int = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ : Optional[int] = num_codevectors_per_group
lowercase__ : List[str] = num_codevector_groups
lowercase__ : Dict = contrastive_logits_temperature
lowercase__ : Tuple = feat_quantizer_dropout
lowercase__ : Any = num_negatives
lowercase__ : Dict = codevector_dim
lowercase__ : Tuple = proj_codevector_dim
lowercase__ : List[str] = diversity_loss_weight
# ctc loss
lowercase__ : Tuple = ctc_loss_reduction
lowercase__ : Dict = ctc_zero_infinity
# pretraining loss
lowercase__ : Optional[Any] = replace_prob
@property
def __UpperCamelCase ( self : Dict ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 333 | 0 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def lowercase_ ( _lowerCamelCase : Optional[int]):
if hor == 128:
lowercase__ : Dict = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 128, 256)
lowercase__ : Dict = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
lowercase__ : Tuple = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
lowercase__ : Union[str, Any] = (32, 64, 128, 256)
lowercase__ : Union[str, Any] = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
lowercase__ : Union[str, Any] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''')
lowercase__ : Union[str, Any] = model.state_dict()
lowercase__ : List[Any] = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_5536,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
lowercase__ : Tuple = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Dict = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''')
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( ):
lowercase__ : List[str] = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 128, 256),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_5536,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
lowercase__ : List[str] = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch")
lowercase__ : str = model
lowercase__ : Optional[int] = UNetaDModel(**_lowerCamelCase)
print(f'''length of state dict: {len(state_dict.keys())}''')
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys())}''')
lowercase__ : Optional[Any] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys()))
for k, v in mapping.items():
lowercase__ : Any = state_dict.pop(_lowerCamelCase)
hf_value_function.load_state_dict(_lowerCamelCase)
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin")
with open("hub/hopper-medium-v2/value_function/config.json" , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 352 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 0 |
from __future__ import annotations
import math
class snake_case_ :
def __init__( self : List[Any] , lowercase_ : int ) -> None:
lowercase__ : Tuple = size
# approximate the overall size of segment tree with given value
lowercase__ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase__ : List[Any] = [0 for i in range(0 , 4 * size )]
lowercase__ : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update
def __UpperCamelCase ( self : Dict , lowercase_ : int ) -> int:
return idx * 2
def __UpperCamelCase ( self : Any , lowercase_ : int ) -> int:
return idx * 2 + 1
def __UpperCamelCase ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : list[int] ) -> None:
if left_element == right_element:
lowercase__ : Optional[int] = a[left_element - 1]
else:
lowercase__ : Union[str, Any] = (left_element + right_element) // 2
self.build(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ )
self.build(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ )
lowercase__ : List[str] = max(
self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> bool:
if self.flag[idx] is True:
lowercase__ : Dict = self.lazy[idx]
lowercase__ : List[Any] = False
if left_element != right_element:
lowercase__ : List[str] = self.lazy[idx]
lowercase__ : Optional[int] = self.lazy[idx]
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase__ : Any = val
if left_element != right_element:
lowercase__ : int = val
lowercase__ : List[str] = val
lowercase__ : Optional[int] = True
lowercase__ : List[str] = True
return True
lowercase__ : Tuple = (left_element + right_element) // 2
self.update(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
self.update(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = max(
self.segment_tree[self.left(lowercase_ )] , self.segment_tree[self.right(lowercase_ )] )
return True
def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int | float:
if self.flag[idx] is True:
lowercase__ : str = self.lazy[idx]
lowercase__ : Any = False
if left_element != right_element:
lowercase__ : Dict = self.lazy[idx]
lowercase__ : Optional[int] = self.lazy[idx]
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[int] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase__ : Dict = (left_element + right_element) // 2
lowercase__ : List[str] = self.query(self.left(lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : Dict = self.query(self.right(lowercase_ ) , mid + 1 , lowercase_ , lowercase_ , lowercase_ )
return max(lowercase_ , lowercase_ )
def __str__( self : List[Any] ) -> str:
return str([self.query(1 , 1 , self.size , lowercase_ , lowercase_ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCamelCase = 15
UpperCamelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 353 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class snake_case_ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
lowercase__ : int = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def __UpperCamelCase ( cls : Optional[Any] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
lowercase__ : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , repo_id="test-config" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase__ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def __UpperCamelCase ( self : Tuple ) -> str:
lowercase__ : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : Optional[Any] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id="valid_org/test-config-org" , push_to_hub=lowercase_ , use_auth_token=self._token )
lowercase__ : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def __UpperCamelCase ( self : Any ) -> str:
CustomConfig.register_for_auto_class()
lowercase__ : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : List[Any] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
lowercase__ : int = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : Dict = c.n_embd + 1 # int
lowercase__ : List[Any] = c.resid_pdrop + 1.0 # float
lowercase__ : Tuple = not c.scale_attn_weights # bool
lowercase__ : Optional[Any] = c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowercase_ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowercase_ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowercase_ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowercase_ , c.summary_type , "mismatch for key: summary_type" )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[str] = PretrainedConfig()
lowercase__ : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase_ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase_ , lowercase_ )]
if len(lowercase_ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {", ".join(lowercase_ )}.''' )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
with self.assertRaises(lowercase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
lowercase__ : int = mock.Mock()
lowercase__ : Tuple = 5_00
lowercase__ : str = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : List[Any] = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowercase_ ) as mock_head:
lowercase__ : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[int] ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def __UpperCamelCase ( self : int ) -> List[str]:
lowercase__ : Tuple = AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase_ )
lowercase__ : str = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase_ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Dict = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Dict = ["config.42.0.0.json"]
lowercase__ : Union[str, Any] = 7_68
configuration.save_pretrained(lowercase_ )
shutil.move(os.path.join(lowercase_ , "config.4.0.0.json" ) , os.path.join(lowercase_ , "config.42.0.0.json" ) )
lowercase__ : Optional[int] = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __UpperCamelCase ( self : Any ) -> Tuple:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Any = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : List[Any] = "v4.0.0"
lowercase__ : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase_ , return_unused_kwargs=lowercase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : List[Any] = "v3.0.0"
lowercase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
import functools
from typing import Any
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : list[str]):
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase) or len(_lowerCamelCase) == 0:
raise ValueError("the string should be not empty string")
if not isinstance(_lowerCamelCase , _lowerCamelCase) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase) and len(_lowerCamelCase) > 0 for item in words):
raise ValueError("the words should be a list of non-empty strings")
# Build trie
lowercase__ : dict[str, Any] = {}
lowercase__ : Any = "WORD_KEEPER"
for word in words:
lowercase__ : int = trie
for c in word:
if c not in trie_node:
lowercase__ : Optional[int] = {}
lowercase__ : Any = trie_node[c]
lowercase__ : Union[str, Any] = True
lowercase__ : int = len(_lowerCamelCase)
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase : int) -> bool:
if index == len_string:
return True
lowercase__ : Any = trie
for i in range(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Any = trie_node.get(string[i] , _lowerCamelCase)
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase) and is_breakable(i + 1):
return True
return False
return is_breakable(0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 | # Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333 | 0 |
import os
def lowercase_ ( _lowerCamelCase : str = "input.txt"):
with open(os.path.join(os.path.dirname(_lowerCamelCase) , _lowerCamelCase)) as input_file:
lowercase__ : Tuple = [
[int(_lowerCamelCase) for element in line.split(",")]
for line in input_file.readlines()
]
lowercase__ : Optional[Any] = len(_lowerCamelCase)
lowercase__ : int = len(matrix[0])
lowercase__ : Dict = [[-1 for _ in range(_lowerCamelCase)] for _ in range(_lowerCamelCase)]
for i in range(_lowerCamelCase):
lowercase__ : int = matrix[i][0]
for j in range(1 , _lowerCamelCase):
for i in range(_lowerCamelCase):
lowercase__ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _lowerCamelCase):
lowercase__ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
lowercase__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f"{solution() = }")
| 356 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int=False):
try:
lowercase__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Union[str, Any] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skip("Test was skipped")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
return unittest.skipUnless(_run_slow_tests , "test is slow")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict):
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_xpu_available() , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(is_tpu_available() , "test requires TPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(is_torch_version(">=" , "1.12.0") , "test requires torch version >= 1.12.0")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None):
if test_case is None:
return partial(_lowerCamelCase , version=_lowerCamelCase)
return unittest.skipUnless(is_torch_version(">=" , _lowerCamelCase) , f'''test requires torch version >= {version}''')(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any]):
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int):
return unittest.skipUnless(is_wandb_available() , "test requires wandb")(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[str]):
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml")(_lowerCamelCase)
UpperCamelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase_ ( _lowerCamelCase : Any):
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_lowerCamelCase)
class snake_case_ ( unittest.TestCase ):
__A : int = True
@classmethod
def __UpperCamelCase ( cls : str ) -> str:
lowercase__ : str = tempfile.mkdtemp()
@classmethod
def __UpperCamelCase ( cls : List[str] ) -> Optional[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCamelCase ( self : str ) -> Optional[int]:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[mock.Mock, List[mock.Mock]] ) -> str:
lowercase__ : Tuple = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Tuple = AcceleratorState()
lowercase__ : Optional[int] = tensor[None].clone().to(state.device)
lowercase__ : Optional[int] = gather(_lowerCamelCase).cpu()
lowercase__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowerCamelCase):
return False
return True
class snake_case_ :
def __init__( self : str , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int ) -> Union[str, Any]:
lowercase__ : int = returncode
lowercase__ : Dict = stdout
lowercase__ : List[Any] = stderr
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str):
while True:
lowercase__ : int = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : str = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : Tuple = []
lowercase__ : List[Any] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : int , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:"))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:"))),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=180 , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=True):
lowercase__ : Optional[Any] = asyncio.get_event_loop()
lowercase__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : str = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Dict = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case_ ( __A ):
pass
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Any=False):
try:
lowercase__ : Optional[int] = subprocess.check_output(_lowerCamelCase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowerCamelCase , "decode"):
lowercase__ : Optional[Any] = output.decode("utf-8")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(_lowerCamelCase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 357 | def lowercase_ ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 1000 , _lowerCamelCase : bool = True):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
and isinstance(_lowerCamelCase , _lowerCamelCase)
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)")
return min_val if option else max_val
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int):
return int((number_a + number_a) / 2)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int):
assert (
isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase) and isinstance(_lowerCamelCase , _lowerCamelCase)
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)")
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value")
def answer(_lowerCamelCase : int) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started...")
lowercase__ : Optional[int] = lower
lowercase__ : List[Any] = higher
lowercase__ : Dict = []
while True:
lowercase__ : Any = get_avg(_lowerCamelCase , _lowerCamelCase)
last_numbers.append(_lowerCamelCase)
if answer(_lowerCamelCase) == "low":
lowercase__ : List[str] = number
elif answer(_lowerCamelCase) == "high":
lowercase__ : Optional[int] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''')
print(f'''details : {last_numbers!s}''')
def lowercase_ ( ):
lowercase__ : Tuple = int(input("Enter lower value : ").strip())
lowercase__ : Optional[int] = int(input("Enter high value : ").strip())
lowercase__ : Optional[Any] = int(input("Enter value to guess : ").strip())
guess_the_number(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if __name__ == "__main__":
main()
| 333 | 0 |
UpperCamelCase : int = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
UpperCamelCase : Union[str, Any] = ['''a''', '''b''', '''c''', '''d''', '''e''']
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple):
lowercase__ : Any = start
# add current to visited
visited.append(_lowerCamelCase)
lowercase__ : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ : Optional[Any] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase)
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase) != len(_lowerCamelCase):
for vertice in vertices:
if vertice not in visited:
lowercase__ : Optional[Any] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# return sort
return sort
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = topological_sort('''a''', [], [])
print(sort)
| 358 | import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ) -> List[str]:
lowercase__ : str = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowercase__ : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowercase_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple=None ) -> Tuple:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
lowercase__ : Optional[int] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
lowercase__ : List[str] = black.format_str(lowercase_ , mode=lowercase_ )
lowercase__ : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowercase_ , "w" , newline="\n" ) as f:
f.write(lowercase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase_ )
with open(lowercase_ , "r" ) as f:
self.assertTrue(f.read() , lowercase_ )
def __UpperCamelCase ( self : str ) -> Optional[int]:
lowercase__ : Optional[Any] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : int ) -> str:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowercase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowercase_ ) , )
# Copy consistency with a really long name
lowercase__ : Optional[int] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' , F'''{long_class_name}SchedulerOutput''' , re.sub("Bert" , lowercase_ , lowercase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowercase_ , overwrite_result=re.sub("DDPM" , "Test" , lowercase_ ) , )
| 333 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
UpperCamelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
UpperCamelCase = 0
UpperCamelCase = 0xe_0_0_0
UpperCamelCase = 0xe_0_0_1
UpperCamelCase = 0xe_0_0_2
UpperCamelCase = 0xe_0_0_3
UpperCamelCase = 0xe_0_0_4
# Maps special codepoints to human-readable names.
UpperCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class snake_case_ ( __A ):
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , lowercase_ : Union[str, Any]=chr(lowercase_ ) , lowercase_ : Tuple=chr(lowercase_ ) , lowercase_ : str=chr(lowercase_ ) , lowercase_ : Optional[int]=chr(lowercase_ ) , lowercase_ : List[Any]=chr(lowercase_ ) , lowercase_ : int=chr(lowercase_ ) , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=20_48 , **lowercase_ : Dict , ) -> List[str]:
lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
lowercase__ : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
lowercase__ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
lowercase__ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , model_max_length=lowercase_ , **lowercase_ , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase__ : Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase__ : Dict = UNICODE_VOCAB_SIZE
lowercase__ : Optional[Any] = len(self._special_codepoints )
@property
def __UpperCamelCase ( self : Tuple ) -> int:
return self._unicode_vocab_size
def __UpperCamelCase ( self : str , lowercase_ : str ) -> List[str]:
return list(lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : str ) -> int:
try:
return ord(lowercase_ )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def __UpperCamelCase ( self : int , lowercase_ : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowercase_ )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> List[str]:
return "".join(lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : Dict = [self.cls_token_id]
lowercase__ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCamelCase ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
lowercase__ : Tuple = [1] + ([0] * len(lowercase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowercase_ )) + [1]
return result
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : Any = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
lowercase__ : Any = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ) -> List[str]:
return ()
| 359 | from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
for param, grad_param in zip(model_a.parameters() , model_b.parameters()):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True):
model.train()
lowercase__ : Tuple = model(_lowerCamelCase)
lowercase__ : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device))
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : str=False):
set_seed(42)
lowercase__ : Dict = RegressionModel()
lowercase__ : int = deepcopy(_lowerCamelCase)
lowercase__ : str = RegressionDataset(length=80)
lowercase__ : List[Any] = DataLoader(_lowerCamelCase , batch_size=16)
model.to(accelerator.device)
if sched:
lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=1E-3)
lowercase__ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3)
lowercase__ : Optional[int] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
lowercase__ : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowerCamelCase: epoch**0.65)
# Make a copy of `model`
if sched:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
lowercase__ , lowercase__ : int = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _lowerCamelCase : Tuple):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase__ , lowercase__ , lowercase__ : List[Any] = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : int = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[int] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : int = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Any):
# Test on distributed setup that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase)
# Use a single batch
lowercase__ , lowercase__ : Dict = next(iter(_lowerCamelCase)).values()
for iteration in range(3):
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : List[str] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Any = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Tuple = ddp_input[torch.randperm(len(_lowerCamelCase))]
def lowercase_ ( _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False):
lowercase__ : int = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ : Optional[int] = get_training_setup(_lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Optional[Any] = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : Union[str, Any] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters()):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
lowercase__ : Dict = ddp_input[torch.randperm(len(_lowerCamelCase))]
GradientState._reset_state()
def lowercase_ ( _lowerCamelCase : List[str]=False , _lowerCamelCase : int=False):
lowercase__ : Dict = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2)
# Test that context manager behaves properly
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = get_training_setup(_lowerCamelCase , _lowerCamelCase)
for iteration, batch in enumerate(_lowerCamelCase):
lowercase__ , lowercase__ : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase__ , lowercase__ : Tuple = accelerator.gather((ddp_input, ddp_target))
lowercase__ , lowercase__ : List[str] = input.to(accelerator.device), target.to(accelerator.device)
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase)):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase__ : Tuple = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration)
GradientState._reset_state()
def lowercase_ ( ):
lowercase__ : List[str] = Accelerator()
lowercase__ : List[Any] = RegressionDataset(length=80)
lowercase__ : Tuple = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ : int = RegressionDataset(length=96)
lowercase__ : List[str] = DataLoader(_lowerCamelCase , batch_size=16)
lowercase__ , lowercase__ : Dict = accelerator.prepare(_lowerCamelCase , _lowerCamelCase)
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if iteration < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase):
assert id(accelerator.gradient_state.active_dataloader) == id(_lowerCamelCase)
if batch_num < len(_lowerCamelCase) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
lowercase__ : str = Accelerator()
lowercase__ : Dict = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**")
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**")
test_noop_sync(_lowerCamelCase)
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**")
test_distributed_sync(_lowerCamelCase)
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase)
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0") or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ ( __A ):
__A : Dict = "facebook/bart-large-mnli"
__A : Any = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__A : Tuple = "text_classifier"
__A : int = AutoTokenizer
__A : List[Any] = AutoModelForSequenceClassification
__A : List[str] = ["text", ["text"]]
__A : int = ["text"]
def __UpperCamelCase ( self : Dict ) -> Any:
super().setup()
lowercase__ : Any = self.model.config
lowercase__ : Tuple = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
lowercase__ : Any = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> Dict:
lowercase__ : Any = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __UpperCamelCase ( self : int , lowercase_ : str ) -> Optional[int]:
lowercase__ : Dict = outputs.logits
lowercase__ : str = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 360 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCamelCase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
UpperCamelCase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
lowercase__ : int = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowercase__ : str = bs[:]
lowercase__ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(_lowerCamelCase)
cs.append(2**8 + n)
n += 1
lowercase__ : int = [chr(_lowerCamelCase) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase))
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowercase__ : Tuple = char
return pairs
class snake_case_ ( __A ):
__A : int = VOCAB_FILES_NAMES
__A : int = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : int = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int="replace" , lowercase_ : Optional[int]="<s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : int="</s>" , lowercase_ : str="<s>" , lowercase_ : int="<unk>" , lowercase_ : int="<pad>" , lowercase_ : Union[str, Any]="<mask>" , lowercase_ : List[str]=False , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
lowercase__ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
lowercase__ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
lowercase__ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
lowercase__ : Dict = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
lowercase__ : List[str] = json.load(lowercase_ )
lowercase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowercase__ : Optional[int] = errors # how to handle errors in decoding
lowercase__ : str = bytes_to_unicode()
lowercase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : List[str] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase__ : int = {}
lowercase__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : Tuple = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : Dict , lowercase_ : Dict ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowercase__ : Union[str, Any] = tuple(lowercase_ )
lowercase__ : List[Any] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
lowercase__ : Any = min(lowercase_ , key=lambda lowercase_ : self.bpe_ranks.get(lowercase_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ : Union[str, Any] = bigram
lowercase__ : Optional[Any] = []
lowercase__ : str = 0
while i < len(lowercase_ ):
try:
lowercase__ : str = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Any = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : List[Any] = tuple(lowercase_ )
lowercase__ : str = new_word
if len(lowercase_ ) == 1:
break
else:
lowercase__ : int = get_pairs(lowercase_ )
lowercase__ : List[str] = " ".join(lowercase_ )
lowercase__ : Tuple = word
return word
def __UpperCamelCase ( self : str , lowercase_ : Optional[int] ) -> List[Any]:
lowercase__ : Optional[Any] = []
for token in re.findall(self.pat , lowercase_ ):
lowercase__ : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> Union[str, Any]:
return self.decoder.get(lowercase_ )
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
lowercase__ : Dict = "".join(lowercase_ )
lowercase__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
lowercase__ : Optional[Any] = 0
with open(lowercase_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowercase_ ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : str=False , **lowercase_ : Union[str, Any] ) -> Union[str, Any]:
lowercase__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
lowercase__ : Optional[Any] = " " + text
return (text, kwargs)
def __UpperCamelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> str:
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : List[str] , lowercase_ : "Conversation" ) -> List[int]:
lowercase__ : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase_ )
lowercase__ : Optional[Any] = " ".join(lowercase_ )
lowercase__ : int = self.encode(lowercase_ )
if len(lowercase_ ) > self.model_max_length:
lowercase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 361 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
__A : Optional[int] = "rwkv"
__A : List[str] = {"max_position_embeddings": "context_length"}
def __init__( self : Dict , lowercase_ : List[Any]=5_02_77 , lowercase_ : Union[str, Any]=10_24 , lowercase_ : Any=40_96 , lowercase_ : int=32 , lowercase_ : Dict=None , lowercase_ : str=None , lowercase_ : Any=1E-5 , lowercase_ : Optional[Any]=0 , lowercase_ : Any=0 , lowercase_ : List[str]=6 , lowercase_ : List[Any]=False , lowercase_ : int=True , **lowercase_ : List[str] , ) -> int:
lowercase__ : List[str] = vocab_size
lowercase__ : str = context_length
lowercase__ : List[Any] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : List[Any] = layer_norm_epsilon
lowercase__ : str = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : int = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 333 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
__A : Any = ["transformers", "torch", "note_seq"]
def __init__( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Optional[Any] ) -> str:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __UpperCamelCase ( cls : Dict , *lowercase_ : str , **lowercase_ : int ) -> Tuple:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : List[str] ) -> Optional[Any]:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 362 | class snake_case_ :
def __init__( self : int ) -> Optional[int]:
lowercase__ : Optional[int] = 0
lowercase__ : List[str] = 0
lowercase__ : Any = {}
def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Union[str, Any]:
if vertex not in self.adjacency:
lowercase__ : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : str ) -> Optional[Any]:
self.add_vertex(lowercase_ )
self.add_vertex(lowercase_ )
if head == tail:
return
lowercase__ : int = weight
lowercase__ : Any = weight
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
lowercase__ : List[Any] = self.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : int = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase_ ) ):
lowercase__ : Tuple = list(edges[i] )
edges.sort(key=lambda lowercase_ : e[2] )
for i in range(len(lowercase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowercase__ : int = edges[i][2] + 1
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : Optional[int] = edge
lowercase__ : Union[str, Any] = weight
lowercase__ : Dict = weight
def __str__( self : str ) -> Any:
lowercase__ : str = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowercase__ : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
lowercase__ : Any = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : List[str] ) -> Dict:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict=None , lowercase_ : Any=None ) -> Optional[int]:
lowercase__ : Any = Graph()
if vertices is None:
lowercase__ : str = []
if edges is None:
lowercase__ : List[Any] = []
for vertex in vertices:
g.add_vertex(lowercase_ )
for edge in edges:
g.add_edge(*lowercase_ )
return g
class snake_case_ :
def __init__( self : int ) -> List[str]:
lowercase__ : Dict = {}
lowercase__ : Tuple = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
return len(self.parent )
def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] ) -> Tuple:
if item in self.parent:
return self.find(lowercase_ )
lowercase__ : Union[str, Any] = item
lowercase__ : int = 0
return item
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[str] ) -> Any:
if item not in self.parent:
return self.make_set(lowercase_ )
if item != self.parent[item]:
lowercase__ : Union[str, Any] = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : str ) -> Optional[Any]:
lowercase__ : Dict = self.find(lowercase_ )
lowercase__ : Optional[int] = self.find(lowercase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowercase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowercase__ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowercase__ : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowercase_ : Dict ) -> Optional[Any]:
lowercase__ : List[Any] = graph.num_vertices
lowercase__ : Optional[Any] = Graph.UnionFind()
lowercase__ : int = []
while num_components > 1:
lowercase__ : List[Any] = {}
for vertex in graph.get_vertices():
lowercase__ : Any = -1
lowercase__ : List[str] = graph.get_edges()
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : str = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowercase__ , lowercase__ , lowercase__ : List[str] = edge
lowercase__ : List[str] = union_find.find(lowercase_ )
lowercase__ : Union[str, Any] = union_find.find(lowercase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowercase__ : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowercase__ , lowercase__ , lowercase__ : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase_ ) != union_find.find(lowercase_ ):
union_find.union(lowercase_ , lowercase_ )
mst_edges.append(cheap_edge[vertex] )
lowercase__ : Optional[Any] = num_components - 1
lowercase__ : List[Any] = Graph.build(edges=lowercase_ )
return mst
| 333 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=False):
lowercase__ : int = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias'''))
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
])
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowercase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit") else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
])
return rename_keys
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=False):
for i in range(config.num_hidden_layers):
if base_model:
lowercase__ : Optional[int] = ""
else:
lowercase__ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''')
lowercase__ : str = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Any = in_proj_bias[: config.hidden_size]
lowercase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : Dict = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]):
lowercase__ : Any = dct.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
def lowercase_ ( ):
lowercase__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple):
lowercase__ : Optional[Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowercase__ : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowercase__ : Optional[int] = 1000
lowercase__ : Dict = "huggingface/label-files"
lowercase__ : Dict = "imagenet-1k-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset") , "r"))
lowercase__ : List[Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : List[str] = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
lowercase__ : List[str] = int(deit_name[-6:-4])
lowercase__ : Dict = int(deit_name[-3:])
# size of the architecture
if deit_name[9:].startswith("tiny"):
lowercase__ : Union[str, Any] = 192
lowercase__ : Dict = 768
lowercase__ : Any = 12
lowercase__ : List[str] = 3
elif deit_name[9:].startswith("small"):
lowercase__ : Tuple = 384
lowercase__ : List[Any] = 1536
lowercase__ : List[Any] = 12
lowercase__ : str = 6
if deit_name[9:].startswith("base"):
pass
elif deit_name[4:].startswith("large"):
lowercase__ : Tuple = 1024
lowercase__ : str = 4096
lowercase__ : List[Any] = 24
lowercase__ : Optional[Any] = 16
# load original model from timm
lowercase__ : Any = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase)
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase__ : Optional[int] = timm_model.state_dict()
lowercase__ : List[str] = create_rename_keys(_lowerCamelCase , _lowerCamelCase)
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Union[str, Any] = DeiTForImageClassificationWithTeacher(_lowerCamelCase).eval()
model.load_state_dict(_lowerCamelCase)
# Check outputs on an image, prepared by DeiTImageProcessor
lowercase__ : str = int(
(256 / 224) * config.image_size) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowercase__ : Union[str, Any] = DeiTImageProcessor(size=_lowerCamelCase , crop_size=config.image_size)
lowercase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt")
lowercase__ : Optional[Any] = encoding["pixel_values"]
lowercase__ : int = model(_lowerCamelCase)
lowercase__ : List[str] = timm_model(_lowerCamelCase)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 363 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case_ ( __A ):
def __get__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=None ) -> Optional[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowercase__ : Any = "__cached_" + self.fget.__name__
lowercase__ : Optional[int] = getattr(lowercase_ , lowercase_ , lowercase_ )
if cached is None:
lowercase__ : Optional[Any] = self.fget(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
return cached
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> Dict:
lowercase__ : str = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''')
def lowercase_ ( _lowerCamelCase : Optional[int]) -> List[str]:
if is_torch_fx_proxy(_lowerCamelCase):
return True
if is_torch_available():
import torch
if isinstance(_lowerCamelCase , torch.Tensor):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowerCamelCase , tf.Tensor):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowerCamelCase , (jnp.ndarray, Tracer)):
return True
return isinstance(_lowerCamelCase , np.ndarray)
def lowercase_ ( _lowerCamelCase : Optional[Any]) -> List[Any]:
return isinstance(_lowerCamelCase , np.ndarray)
def lowercase_ ( _lowerCamelCase : Any) -> int:
return _is_numpy(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]) -> List[Any]:
import torch
return isinstance(_lowerCamelCase , torch.Tensor)
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> List[Any]:
return False if not is_torch_available() else _is_torch(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]) -> Any:
import torch
return isinstance(_lowerCamelCase , torch.device)
def lowercase_ ( _lowerCamelCase : List[Any]) -> Tuple:
return False if not is_torch_available() else _is_torch_device(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[Any]) -> Optional[Any]:
import torch
if isinstance(_lowerCamelCase , _lowerCamelCase):
if hasattr(_lowerCamelCase , _lowerCamelCase):
lowercase__ : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase)
else:
return False
return isinstance(_lowerCamelCase , torch.dtype)
def lowercase_ ( _lowerCamelCase : int) -> Union[str, Any]:
return False if not is_torch_available() else _is_torch_dtype(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str) -> str:
import tensorflow as tf
return isinstance(_lowerCamelCase , tf.Tensor)
def lowercase_ ( _lowerCamelCase : List[Any]) -> Tuple:
return False if not is_tf_available() else _is_tensorflow(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int) -> List[Any]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowerCamelCase , "is_symbolic_tensor"):
return tf.is_symbolic_tensor(_lowerCamelCase)
return type(_lowerCamelCase) == tf.Tensor
def lowercase_ ( _lowerCamelCase : str) -> Dict:
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict) -> str:
import jax.numpy as jnp # noqa: F811
return isinstance(_lowerCamelCase , jnp.ndarray)
def lowercase_ ( _lowerCamelCase : Any) -> List[Any]:
return False if not is_flax_available() else _is_jax(_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Dict) -> Tuple:
if isinstance(_lowerCamelCase , (dict, UserDict)):
return {k: to_py_obj(_lowerCamelCase) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple)):
return [to_py_obj(_lowerCamelCase) for o in obj]
elif is_tf_tensor(_lowerCamelCase):
return obj.numpy().tolist()
elif is_torch_tensor(_lowerCamelCase):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowerCamelCase):
return np.asarray(_lowerCamelCase).tolist()
elif isinstance(_lowerCamelCase , (np.ndarray, np.number)): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _lowerCamelCase : Optional[int]) -> Optional[int]:
if isinstance(_lowerCamelCase , (dict, UserDict)):
return {k: to_numpy(_lowerCamelCase) for k, v in obj.items()}
elif isinstance(_lowerCamelCase , (list, tuple)):
return np.array(_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
return obj.numpy()
elif is_torch_tensor(_lowerCamelCase):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowerCamelCase):
return np.asarray(_lowerCamelCase)
else:
return obj
class snake_case_ ( __A ):
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
lowercase__ : Union[str, Any] = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowercase__ : List[Any] = getattr(self , class_fields[0].name )
lowercase__ : int = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = first_field.items()
lowercase__ : Optional[Any] = True
else:
try:
lowercase__ : str = iter(lowercase_ )
lowercase__ : Any = True
except TypeError:
lowercase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ , (list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] , lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowercase__ : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowercase__ : Tuple = element[1]
elif first_field is not None:
lowercase__ : Optional[Any] = first_field
else:
for field in class_fields:
lowercase__ : List[Any] = getattr(self , field.name )
if v is not None:
lowercase__ : List[str] = v
def __delitem__( self : Optional[Any] , *lowercase_ : Optional[int] , **lowercase_ : Dict ) -> Optional[int]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ) -> Tuple:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Optional[int]:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __UpperCamelCase ( self : List[str] , *lowercase_ : str , **lowercase_ : Dict ) -> str:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[Any] , lowercase_ : List[str] ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ , lowercase_ )
super().__setattr__(lowercase_ , lowercase_ )
def __setitem__( self : Union[str, Any] , lowercase_ : int , lowercase_ : str ) -> int:
# Will raise a KeyException if needed
super().__setitem__(lowercase_ , lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : str ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class snake_case_ ( __A ,__A ):
@classmethod
def __UpperCamelCase ( cls : int , lowercase_ : Dict ) -> Optional[Any]:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case_ ( __A ):
__A : str = "longest"
__A : Any = "max_length"
__A : List[Any] = "do_not_pad"
class snake_case_ ( __A ):
__A : Dict = "pt"
__A : str = "tf"
__A : Optional[int] = "np"
__A : List[Any] = "jax"
class snake_case_ :
def __init__( self : Dict , lowercase_ : List[ContextManager] ) -> Optional[Any]:
lowercase__ : str = context_managers
lowercase__ : List[Any] = ExitStack()
def __enter__( self : Dict ) -> List[Any]:
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self : Tuple , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Optional[Any]:
self.stack.__exit__(*lowercase_ , **lowercase_ )
def lowercase_ ( _lowerCamelCase : Any) -> int:
lowercase__ : Optional[Any] = infer_framework(_lowerCamelCase)
if framework == "tf":
lowercase__ : List[str] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase__ : List[str] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase__ : List[Any] = inspect.signature(model_class.__call__) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _lowerCamelCase : Tuple) -> List[str]:
lowercase__ : Union[str, Any] = model_class.__name__
lowercase__ : Union[str, Any] = infer_framework(_lowerCamelCase)
if framework == "tf":
lowercase__ : Union[str, Any] = inspect.signature(model_class.call) # TensorFlow models
elif framework == "pt":
lowercase__ : Optional[int] = inspect.signature(model_class.forward) # PyTorch models
else:
lowercase__ : List[str] = inspect.signature(model_class.__call__) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _lowerCamelCase : MutableMapping , _lowerCamelCase : str = "" , _lowerCamelCase : str = ".") -> List[Any]:
def _flatten_dict(_lowerCamelCase : Tuple , _lowerCamelCase : Any="" , _lowerCamelCase : int="."):
for k, v in d.items():
lowercase__ : Optional[Any] = str(_lowerCamelCase) + delimiter + str(_lowerCamelCase) if parent_key else k
if v and isinstance(_lowerCamelCase , _lowerCamelCase):
yield from flatten_dict(_lowerCamelCase , _lowerCamelCase , delimiter=_lowerCamelCase).items()
else:
yield key, v
return dict(_flatten_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase))
@contextmanager
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : bool = False) -> List[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=None) -> Optional[int]:
if is_numpy_array(_lowerCamelCase):
return np.transpose(_lowerCamelCase , axes=_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.T if axes is None else array.permute(*_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.transpose(_lowerCamelCase , perm=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.transpose(_lowerCamelCase , axes=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for transpose: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple) -> Any:
if is_numpy_array(_lowerCamelCase):
return np.reshape(_lowerCamelCase , _lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.reshape(*_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.reshape(_lowerCamelCase , _lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.reshape(_lowerCamelCase , _lowerCamelCase)
else:
raise ValueError(f'''Type not supported for reshape: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=None) -> Tuple:
if is_numpy_array(_lowerCamelCase):
return np.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.squeeze() if axis is None else array.squeeze(dim=_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.squeeze(_lowerCamelCase , axis=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for squeeze: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]) -> Optional[int]:
if is_numpy_array(_lowerCamelCase):
return np.expand_dims(_lowerCamelCase , _lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.unsqueeze(dim=_lowerCamelCase)
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.expand_dims(_lowerCamelCase , axis=_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return jnp.expand_dims(_lowerCamelCase , axis=_lowerCamelCase)
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : int) -> str:
if is_numpy_array(_lowerCamelCase):
return np.size(_lowerCamelCase)
elif is_torch_tensor(_lowerCamelCase):
return array.numel()
elif is_tf_tensor(_lowerCamelCase):
import tensorflow as tf
return tf.size(_lowerCamelCase)
elif is_jax_tensor(_lowerCamelCase):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_lowerCamelCase)}.''')
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int) -> Union[str, Any]:
for key, value in auto_map.items():
if isinstance(_lowerCamelCase , (tuple, list)):
lowercase__ : Optional[Any] = [f'''{repo_id}--{v}''' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowercase__ : Any = f'''{repo_id}--{value}'''
return auto_map
def lowercase_ ( _lowerCamelCase : Union[str, Any]) -> Union[str, Any]:
for base_class in inspect.getmro(_lowerCamelCase):
lowercase__ : List[str] = base_class.__module__
lowercase__ : Any = base_class.__name__
if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch") or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''')
| 364 | def lowercase_ ( _lowerCamelCase : int = 1 , _lowerCamelCase : int = 1000):
lowercase__ : Union[str, Any] = 1
lowercase__ : int = 0
for divide_by_number in range(_lowerCamelCase , digit + 1):
lowercase__ : list[int] = []
lowercase__ : Dict = numerator
for _ in range(1 , digit + 1):
if now_divide in has_been_divided:
if longest_list_length < len(_lowerCamelCase):
lowercase__ : Union[str, Any] = len(_lowerCamelCase)
lowercase__ : Optional[int] = divide_by_number
else:
has_been_divided.append(_lowerCamelCase)
lowercase__ : Optional[Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.